diff --git a/.ipynb_checkpoints/Training-checkpoint.ipynb b/.ipynb_checkpoints/Training-checkpoint.ipynb new file mode 100644 index 00000000..363fcab7 --- /dev/null +++ b/.ipynb_checkpoints/Training-checkpoint.ipynb @@ -0,0 +1,6 @@ +{ + "cells": [], + "metadata": {}, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/.ipynb_checkpoints/TrainingModel-checkpoint.ipynb b/.ipynb_checkpoints/TrainingModel-checkpoint.ipynb new file mode 100644 index 00000000..3478e26b --- /dev/null +++ b/.ipynb_checkpoints/TrainingModel-checkpoint.ipynb @@ -0,0 +1,1217 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 8, + "id": "bd4924a2-c787-4ed6-8fbb-191ea5332ebc", + "metadata": {}, + "outputs": [], + "source": [ + "WORKSPACE_PATH = 'Tensorflow/workspace'\n", + "SCRIPTS_PATH = 'Tensorflow/scripts'\n", + "APIMODEL_PATH = 'Tensorflow/models'\n", + "ANNOTATION_PATH = WORKSPACE_PATH+'/annotations'\n", + "IMAGE_PATH = WORKSPACE_PATH+'/images'\n", + "MODEL_PATH = WORKSPACE_PATH+'/models'\n", + "PRETRAINED_MODEL_PATH = WORKSPACE_PATH+'/pre-trained-models'\n", + "CONFIG_PATH = MODEL_PATH+'/my_ssd_mobnet/pipeline.config'\n", + "CHECKPOINT_PATH = MODEL_PATH+'/my_ssd_mobnet/'" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "4d1618d1-80db-4eed-9349-2330c2cec6ad", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "fatal: destination path 'models' already exists and is not an empty directory.\n" + ] + } + ], + "source": [ + "!cd Tensorflow && git clone https://github.com/tensorflow/models" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "ffe4f27a-1e01-46ff-97fb-31cc5ad5dcfd", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[34mssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8\u001b[m\u001b[m\n", + "ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8.tar.gz\n" + ] + } + ], + "source": [ + "!ls Tensorflow/workspace/pre-trained-models\n" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "6ed340c0-ae12-4959-95eb-3118530eeb0a", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[34mcheckpoint\u001b[m\u001b[m pipeline.config \u001b[34msaved_model\u001b[m\u001b[m\n" + ] + } + ], + "source": [ + "!ls Tensorflow/workspace/pre-trained-models/ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8\n" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "id": "9a2527f9-716b-4b75-a8ba-af66434d5283", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "pipeline.config\n" + ] + } + ], + "source": [ + "CUSTOM_MODEL_NAME='my_ssd_mobnet'\n", + "PRETRAINED_MODEL_PATH='Tensorflow/workspace/pre-trained-models/ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8'\n", + "MODEL_PATH='Tensorflow/workspace/models'\n", + "\n", + "!mkdir -p {MODEL_PATH}/{CUSTOM_MODEL_NAME}\n", + "!cp {PRETRAINED_MODEL_PATH}/pipeline.config {MODEL_PATH}/{CUSTOM_MODEL_NAME}\n", + "!ls {MODEL_PATH}/{CUSTOM_MODEL_NAME}\n" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "id": "23f9d062-d8de-497a-a1c2-e084a3418c16", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[34mannotations\u001b[m\u001b[m/ \u001b[34mmodels\u001b[m\u001b[m/\n", + "annotations\\label_map.pbtxt \u001b[34mpre-trained-models\u001b[m\u001b[m/\n", + "\u001b[34mimages\u001b[m\u001b[m/\n" + ] + } + ], + "source": [ + "ls Tensorflow/workspace\n" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "id": "6e585131-ccd2-45c9-9111-e21dc7030e33", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Copied: Tensorflow/workspace/models/my_ssd_mobnet/pipeline.config\n" + ] + } + ], + "source": [ + "# Step 4 — copy the pretrained model's pipeline.config to your training folder\n", + "\n", + "CUSTOM_MODEL_NAME = 'my_ssd_mobnet'\n", + "MODEL_PATH = 'Tensorflow/workspace/models'\n", + "PRETRAINED_ROOT = 'Tensorflow/workspace/pre-trained-models' \n", + "\n", + "PRETRAINED_MODEL_DIR = f'{PRETRAINED_ROOT}/ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8'\n", + "\n", + "import os, shutil\n", + "os.makedirs(f'{MODEL_PATH}/{CUSTOM_MODEL_NAME}', exist_ok=True)\n", + "\n", + "# copy the config (overwrite if present)\n", + "src = f'{PRETRAINED_MODEL_DIR}/pipeline.config'\n", + "dst = f'{MODEL_PATH}/{CUSTOM_MODEL_NAME}/pipeline.config'\n", + "shutil.copyfile(src, dst)\n", + "\n", + "print('Copied:', dst)\n" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "id": "73b46bce-3907-4143-ae07-ce50b3c879dd", + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "import tensorflow as tf\n", + "from object_detection.utils import config_util\n", + "from object_detection.protos import pipeline_pb2\n", + "from google.protobuf import text_format" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "id": "90aa6236-9e5b-42ae-bf93-7231d905c8e7", + "metadata": {}, + "outputs": [], + "source": [ + "!pip install --quiet --upgrade \"protobuf==3.20.3\"\n" + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "id": "1072c13b-38d9-4de0-b2db-e29e07383ad2", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Requirement already satisfied: protobuf==3.20.3 in /opt/anaconda3/lib/python3.12/site-packages (3.20.3)\n", + "Requirement already satisfied: tensorflow-metadata<=1.13.1 in /opt/anaconda3/lib/python3.12/site-packages (1.13.1)\n", + "Requirement already satisfied: absl-py<2.0.0,>=0.9 in /opt/anaconda3/lib/python3.12/site-packages (from tensorflow-metadata<=1.13.1) (1.4.0)\n", + "Requirement already satisfied: googleapis-common-protos<2,>=1.52.0 in /opt/anaconda3/lib/python3.12/site-packages (from tensorflow-metadata<=1.13.1) (1.72.0)\n", + "Requirement already satisfied: protobuf<5,>=3.20.3 in /opt/anaconda3/lib/python3.12/site-packages (from tensorflow-metadata<=1.13.1) (3.20.3)\n", + "Requirement already satisfied: tensorflow-datasets<5.0.0 in /opt/anaconda3/lib/python3.12/site-packages (4.9.9)\n", + "Requirement already satisfied: absl-py in /opt/anaconda3/lib/python3.12/site-packages (from tensorflow-datasets<5.0.0) (1.4.0)\n", + "Requirement already satisfied: dm-tree in /opt/anaconda3/lib/python3.12/site-packages (from tensorflow-datasets<5.0.0) (0.1.9)\n", + "Requirement already satisfied: etils>=1.9.1 in /opt/anaconda3/lib/python3.12/site-packages (from etils[edc,enp,epath,epy,etree]>=1.9.1; python_version >= \"3.11\"->tensorflow-datasets<5.0.0) (1.13.0)\n", + "Requirement already satisfied: immutabledict in /opt/anaconda3/lib/python3.12/site-packages (from tensorflow-datasets<5.0.0) (4.2.2)\n", + "Requirement already satisfied: numpy in /opt/anaconda3/lib/python3.12/site-packages (from tensorflow-datasets<5.0.0) (1.26.4)\n", + "Requirement already satisfied: promise in /opt/anaconda3/lib/python3.12/site-packages (from tensorflow-datasets<5.0.0) (2.3)\n", + "Requirement already satisfied: protobuf>=3.20 in /opt/anaconda3/lib/python3.12/site-packages (from tensorflow-datasets<5.0.0) (3.20.3)\n", + "Requirement already satisfied: psutil in /opt/anaconda3/lib/python3.12/site-packages (from tensorflow-datasets<5.0.0) (5.9.0)\n", + "Requirement already satisfied: pyarrow in /opt/anaconda3/lib/python3.12/site-packages (from tensorflow-datasets<5.0.0) (16.1.0)\n", + "Requirement already satisfied: requests>=2.19.0 in /opt/anaconda3/lib/python3.12/site-packages (from tensorflow-datasets<5.0.0) (2.32.5)\n", + "Requirement already satisfied: simple_parsing in /opt/anaconda3/lib/python3.12/site-packages (from tensorflow-datasets<5.0.0) (0.1.7)\n", + "Requirement already satisfied: tensorflow-metadata in /opt/anaconda3/lib/python3.12/site-packages (from tensorflow-datasets<5.0.0) (1.13.1)\n", + "Requirement already satisfied: termcolor in /opt/anaconda3/lib/python3.12/site-packages (from tensorflow-datasets<5.0.0) (3.2.0)\n", + "Requirement already satisfied: toml in /opt/anaconda3/lib/python3.12/site-packages (from tensorflow-datasets<5.0.0) (0.10.2)\n", + "Requirement already satisfied: tqdm in /opt/anaconda3/lib/python3.12/site-packages (from tensorflow-datasets<5.0.0) (4.66.5)\n", + "Requirement already satisfied: wrapt in /opt/anaconda3/lib/python3.12/site-packages (from tensorflow-datasets<5.0.0) (1.14.1)\n", + "Requirement already satisfied: einops in /opt/anaconda3/lib/python3.12/site-packages (from etils[edc,enp,epath,epy,etree]>=1.9.1; python_version >= \"3.11\"->tensorflow-datasets<5.0.0) (0.8.1)\n", + "Requirement already satisfied: fsspec in /opt/anaconda3/lib/python3.12/site-packages (from etils[edc,enp,epath,epy,etree]>=1.9.1; python_version >= \"3.11\"->tensorflow-datasets<5.0.0) (2024.6.1)\n", + "Requirement already satisfied: importlib_resources in /opt/anaconda3/lib/python3.12/site-packages (from etils[edc,enp,epath,epy,etree]>=1.9.1; python_version >= \"3.11\"->tensorflow-datasets<5.0.0) (6.5.2)\n", + "Requirement already satisfied: typing_extensions in /opt/anaconda3/lib/python3.12/site-packages (from etils[edc,enp,epath,epy,etree]>=1.9.1; python_version >= \"3.11\"->tensorflow-datasets<5.0.0) (4.11.0)\n", + "Requirement already satisfied: zipp in /opt/anaconda3/lib/python3.12/site-packages (from etils[edc,enp,epath,epy,etree]>=1.9.1; python_version >= \"3.11\"->tensorflow-datasets<5.0.0) (3.17.0)\n", + "Requirement already satisfied: charset_normalizer<4,>=2 in /opt/anaconda3/lib/python3.12/site-packages (from requests>=2.19.0->tensorflow-datasets<5.0.0) (3.3.2)\n", + "Requirement already satisfied: idna<4,>=2.5 in /opt/anaconda3/lib/python3.12/site-packages (from requests>=2.19.0->tensorflow-datasets<5.0.0) (3.7)\n", + "Requirement already satisfied: urllib3<3,>=1.21.1 in /opt/anaconda3/lib/python3.12/site-packages (from requests>=2.19.0->tensorflow-datasets<5.0.0) (2.2.3)\n", + "Requirement already satisfied: certifi>=2017.4.17 in /opt/anaconda3/lib/python3.12/site-packages (from requests>=2.19.0->tensorflow-datasets<5.0.0) (2025.1.31)\n", + "Requirement already satisfied: attrs>=18.2.0 in /opt/anaconda3/lib/python3.12/site-packages (from dm-tree->tensorflow-datasets<5.0.0) (23.1.0)\n", + "Requirement already satisfied: six in /opt/anaconda3/lib/python3.12/site-packages (from promise->tensorflow-datasets<5.0.0) (1.16.0)\n", + "Requirement already satisfied: docstring-parser<1.0,>=0.15 in /opt/anaconda3/lib/python3.12/site-packages (from simple_parsing->tensorflow-datasets<5.0.0) (0.17.0)\n", + "Requirement already satisfied: googleapis-common-protos<2,>=1.52.0 in /opt/anaconda3/lib/python3.12/site-packages (from tensorflow-metadata->tensorflow-datasets<5.0.0) (1.72.0)\n" + ] + } + ], + "source": [ + "!pip install --upgrade \"protobuf==3.20.3\"\n", + "!pip install --upgrade \"tensorflow-metadata<=1.13.1\"\n", + "!pip install --upgrade \"tensorflow-datasets<5.0.0\"\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 40, + "id": "14df5a9a-e653-446a-87fc-9ce0ba13abc1", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Kernel Python: /opt/anaconda3/envs/tfod2/bin/python\n" + ] + } + ], + "source": [ + "import sys\n", + "print(\"Kernel Python:\", sys.executable)\n" + ] + }, + { + "cell_type": "code", + "execution_count": 42, + "id": "f8f60537-7b66-46bc-9464-606850ac6505", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Collecting protobuf==3.20.3\n", + " Using cached protobuf-3.20.3-py2.py3-none-any.whl.metadata (720 bytes)\n", + "Using cached protobuf-3.20.3-py2.py3-none-any.whl (162 kB)\n", + "Installing collected packages: protobuf\n", + " Attempting uninstall: protobuf\n", + " Found existing installation: protobuf 3.19.6\n", + " Uninstalling protobuf-3.19.6:\n", + " Successfully uninstalled protobuf-3.19.6\n", + "\u001b[31mERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\n", + "object-detection 0.1 requires tensorflow_io, which is not installed.\n", + "tf-models-official 2.19.1 requires tensorflow~=2.19.0, which is not installed.\n", + "tf-models-official 2.19.1 requires tensorflow-datasets, which is not installed.\n", + "tf-models-official 2.19.1 requires tensorflow-hub>=0.6.0, which is not installed.\n", + "tf-models-official 2.19.1 requires tensorflow-model-optimization>=0.4.1, which is not installed.\n", + "tf-models-official 2.19.1 requires tensorflow-text~=2.19.0, which is not installed.\n", + "tensorboard 2.10.1 requires protobuf<3.20,>=3.9.2, but you have protobuf 3.20.3 which is incompatible.\n", + "tensorflow-macos 2.10.0 requires protobuf<3.20,>=3.9.2, but you have protobuf 3.20.3 which is incompatible.\u001b[0m\u001b[31m\n", + "\u001b[0mSuccessfully installed protobuf-3.20.3\n", + "Collecting tensorflow-metadata<=1.13.1\n", + " Using cached tensorflow_metadata-1.13.1-py3-none-any.whl.metadata (2.1 kB)\n", + "Requirement already satisfied: absl-py<2.0.0,>=0.9 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tensorflow-metadata<=1.13.1) (1.4.0)\n", + "Requirement already satisfied: googleapis-common-protos<2,>=1.52.0 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tensorflow-metadata<=1.13.1) (1.72.0)\n", + "Requirement already satisfied: protobuf<5,>=3.20.3 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tensorflow-metadata<=1.13.1) (3.20.3)\n", + "Using cached tensorflow_metadata-1.13.1-py3-none-any.whl (28 kB)\n", + "Installing collected packages: tensorflow-metadata\n", + "Successfully installed tensorflow-metadata-1.13.1\n", + "Collecting tensorflow-datasets<5.0.0\n", + " Using cached tensorflow_datasets-4.9.9-py3-none-any.whl.metadata (11 kB)\n", + "Requirement already satisfied: absl-py in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tensorflow-datasets<5.0.0) (1.4.0)\n", + "Requirement already satisfied: dm-tree in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tensorflow-datasets<5.0.0) (0.1.9)\n", + "Requirement already satisfied: etils>=1.6.0 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from etils[edc,enp,epath,epy,etree]>=1.6.0; python_version < \"3.11\"->tensorflow-datasets<5.0.0) (1.13.0)\n", + "Requirement already satisfied: immutabledict in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tensorflow-datasets<5.0.0) (4.2.2)\n", + "Requirement already satisfied: numpy in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tensorflow-datasets<5.0.0) (1.26.4)\n", + "Requirement already satisfied: promise in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tensorflow-datasets<5.0.0) (2.3)\n", + "Requirement already satisfied: protobuf>=3.20 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tensorflow-datasets<5.0.0) (3.20.3)\n", + "Requirement already satisfied: psutil in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tensorflow-datasets<5.0.0) (7.1.3)\n", + "Requirement already satisfied: pyarrow in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tensorflow-datasets<5.0.0) (18.1.0)\n", + "Requirement already satisfied: requests>=2.19.0 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tensorflow-datasets<5.0.0) (2.32.5)\n", + "Requirement already satisfied: simple_parsing in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tensorflow-datasets<5.0.0) (0.1.7)\n", + "Requirement already satisfied: tensorflow-metadata in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tensorflow-datasets<5.0.0) (1.13.1)\n", + "Requirement already satisfied: termcolor in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tensorflow-datasets<5.0.0) (3.2.0)\n", + "Requirement already satisfied: toml in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tensorflow-datasets<5.0.0) (0.10.2)\n", + "Requirement already satisfied: tqdm in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tensorflow-datasets<5.0.0) (4.67.1)\n", + "Requirement already satisfied: wrapt in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tensorflow-datasets<5.0.0) (2.0.1)\n", + "Requirement already satisfied: einops in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from etils[edc,enp,epath,epy,etree]>=1.6.0; python_version < \"3.11\"->tensorflow-datasets<5.0.0) (0.8.1)\n", + "Requirement already satisfied: typing_extensions in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from etils[edc,enp,epath,epy,etree]>=1.6.0; python_version < \"3.11\"->tensorflow-datasets<5.0.0) (4.15.0)\n", + "Requirement already satisfied: fsspec in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from etils[edc,enp,epath,epy,etree]>=1.6.0; python_version < \"3.11\"->tensorflow-datasets<5.0.0) (2025.10.0)\n", + "Requirement already satisfied: importlib_resources in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from etils[edc,enp,epath,epy,etree]>=1.6.0; python_version < \"3.11\"->tensorflow-datasets<5.0.0) (6.5.2)\n", + "Requirement already satisfied: zipp in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from etils[edc,enp,epath,epy,etree]>=1.6.0; python_version < \"3.11\"->tensorflow-datasets<5.0.0) (3.23.0)\n", + "Requirement already satisfied: charset_normalizer<4,>=2 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from requests>=2.19.0->tensorflow-datasets<5.0.0) (3.4.4)\n", + "Requirement already satisfied: idna<4,>=2.5 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from requests>=2.19.0->tensorflow-datasets<5.0.0) (3.11)\n", + "Requirement already satisfied: urllib3<3,>=1.21.1 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from requests>=2.19.0->tensorflow-datasets<5.0.0) (2.5.0)\n", + "Requirement already satisfied: certifi>=2017.4.17 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from requests>=2.19.0->tensorflow-datasets<5.0.0) (2025.10.5)\n", + "Requirement already satisfied: attrs>=18.2.0 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from dm-tree->tensorflow-datasets<5.0.0) (25.4.0)\n", + "Requirement already satisfied: six in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from promise->tensorflow-datasets<5.0.0) (1.17.0)\n", + "Requirement already satisfied: docstring-parser<1.0,>=0.15 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from simple_parsing->tensorflow-datasets<5.0.0) (0.17.0)\n", + "Requirement already satisfied: googleapis-common-protos<2,>=1.52.0 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tensorflow-metadata->tensorflow-datasets<5.0.0) (1.72.0)\n", + "Using cached tensorflow_datasets-4.9.9-py3-none-any.whl (5.3 MB)\n", + "Installing collected packages: tensorflow-datasets\n", + "\u001b[31mERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\n", + "tf-models-official 2.19.1 requires tensorflow~=2.19.0, which is not installed.\n", + "tf-models-official 2.19.1 requires tensorflow-hub>=0.6.0, which is not installed.\n", + "tf-models-official 2.19.1 requires tensorflow-model-optimization>=0.4.1, which is not installed.\n", + "tf-models-official 2.19.1 requires tensorflow-text~=2.19.0, which is not installed.\u001b[0m\u001b[31m\n", + "\u001b[0mSuccessfully installed tensorflow-datasets-4.9.9\n" + ] + } + ], + "source": [ + "import sys\n", + "!{sys.executable} -m pip install --upgrade \"protobuf==3.20.3\"\n", + "!{sys.executable} -m pip install --upgrade \"tensorflow-metadata<=1.13.1\"\n", + "# optional but fine to keep:\n", + "!{sys.executable} -m pip install --upgrade \"tensorflow-datasets<5.0.0\"\n" + ] + }, + { + "cell_type": "code", + "execution_count": 28, + "id": "bb08a0f8-85b3-406b-8bec-79ed00899335", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Kernel: /opt/anaconda3/envs/tfod2/bin/python\n", + "protobuf: 3.20.3\n", + "TF: 2.10.0 | imports OK\n" + ] + } + ], + "source": [ + "import sys, google.protobuf\n", + "print(\"Kernel:\", sys.executable)\n", + "print(\"protobuf:\", google.protobuf.__version__) # should be 3.20.3\n", + "\n", + "import tensorflow as tf\n", + "from object_detection.utils import config_util\n", + "from object_detection.protos import pipeline_pb2\n", + "from google.protobuf import text_format\n", + "print(\"TF:\", tf.__version__, \" | imports OK\")\n" + ] + }, + { + "cell_type": "code", + "execution_count": 30, + "id": "7beca04c-97e4-4a1e-a3fc-9edaa017dadd", + "metadata": {}, + "outputs": [], + "source": [ + "import tensorflow as tf\n", + "from object_detection.utils import config_util\n", + "from object_detection.protos import pipeline_pb2\n", + "from google.protobuf import text_format" + ] + }, + { + "cell_type": "code", + "execution_count": 32, + "id": "f702b33d-7ca0-4fc9-a6c4-ba6b2377a171", + "metadata": {}, + "outputs": [], + "source": [ + "CONFIG_PATH = MODEL_PATH+'/'+CUSTOM_MODEL_NAME+'/pipeline.config'" + ] + }, + { + "cell_type": "code", + "execution_count": 34, + "id": "fa1dad82-9a0a-4452-95dc-495abd4c930a", + "metadata": {}, + "outputs": [], + "source": [ + "config = config_util.get_configs_from_pipeline_file(CONFIG_PATH)" + ] + }, + { + "cell_type": "code", + "execution_count": 36, + "id": "e75285f2-4358-4a11-91be-92b4690f28d9", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'model': ssd {\n", + " num_classes: 90\n", + " image_resizer {\n", + " fixed_shape_resizer {\n", + " height: 320\n", + " width: 320\n", + " }\n", + " }\n", + " feature_extractor {\n", + " type: \"ssd_mobilenet_v2_fpn_keras\"\n", + " depth_multiplier: 1.0\n", + " min_depth: 16\n", + " conv_hyperparams {\n", + " regularizer {\n", + " l2_regularizer {\n", + " weight: 4e-05\n", + " }\n", + " }\n", + " initializer {\n", + " random_normal_initializer {\n", + " mean: 0.0\n", + " stddev: 0.01\n", + " }\n", + " }\n", + " activation: RELU_6\n", + " batch_norm {\n", + " decay: 0.997\n", + " scale: true\n", + " epsilon: 0.001\n", + " }\n", + " }\n", + " use_depthwise: true\n", + " override_base_feature_extractor_hyperparams: true\n", + " fpn {\n", + " min_level: 3\n", + " max_level: 7\n", + " additional_layer_depth: 128\n", + " }\n", + " }\n", + " box_coder {\n", + " faster_rcnn_box_coder {\n", + " y_scale: 10.0\n", + " x_scale: 10.0\n", + " height_scale: 5.0\n", + " width_scale: 5.0\n", + " }\n", + " }\n", + " matcher {\n", + " argmax_matcher {\n", + " matched_threshold: 0.5\n", + " unmatched_threshold: 0.5\n", + " ignore_thresholds: false\n", + " negatives_lower_than_unmatched: true\n", + " force_match_for_each_row: true\n", + " use_matmul_gather: true\n", + " }\n", + " }\n", + " similarity_calculator {\n", + " iou_similarity {\n", + " }\n", + " }\n", + " box_predictor {\n", + " weight_shared_convolutional_box_predictor {\n", + " conv_hyperparams {\n", + " regularizer {\n", + " l2_regularizer {\n", + " weight: 4e-05\n", + " }\n", + " }\n", + " initializer {\n", + " random_normal_initializer {\n", + " mean: 0.0\n", + " stddev: 0.01\n", + " }\n", + " }\n", + " activation: RELU_6\n", + " batch_norm {\n", + " decay: 0.997\n", + " scale: true\n", + " epsilon: 0.001\n", + " }\n", + " }\n", + " depth: 128\n", + " num_layers_before_predictor: 4\n", + " kernel_size: 3\n", + " class_prediction_bias_init: -4.6\n", + " share_prediction_tower: true\n", + " use_depthwise: true\n", + " }\n", + " }\n", + " anchor_generator {\n", + " multiscale_anchor_generator {\n", + " min_level: 3\n", + " max_level: 7\n", + " anchor_scale: 4.0\n", + " aspect_ratios: 1.0\n", + " aspect_ratios: 2.0\n", + " aspect_ratios: 0.5\n", + " scales_per_octave: 2\n", + " }\n", + " }\n", + " post_processing {\n", + " batch_non_max_suppression {\n", + " score_threshold: 1e-08\n", + " iou_threshold: 0.6\n", + " max_detections_per_class: 100\n", + " max_total_detections: 100\n", + " use_static_shapes: false\n", + " }\n", + " score_converter: SIGMOID\n", + " }\n", + " normalize_loss_by_num_matches: true\n", + " loss {\n", + " localization_loss {\n", + " weighted_smooth_l1 {\n", + " }\n", + " }\n", + " classification_loss {\n", + " weighted_sigmoid_focal {\n", + " gamma: 2.0\n", + " alpha: 0.25\n", + " }\n", + " }\n", + " classification_weight: 1.0\n", + " localization_weight: 1.0\n", + " }\n", + " encode_background_as_zeros: true\n", + " normalize_loc_loss_by_codesize: true\n", + " inplace_batchnorm_update: true\n", + " freeze_batchnorm: false\n", + " },\n", + " 'train_config': batch_size: 128\n", + " data_augmentation_options {\n", + " random_horizontal_flip {\n", + " }\n", + " }\n", + " data_augmentation_options {\n", + " random_crop_image {\n", + " min_object_covered: 0.0\n", + " min_aspect_ratio: 0.75\n", + " max_aspect_ratio: 3.0\n", + " min_area: 0.75\n", + " max_area: 1.0\n", + " overlap_thresh: 0.0\n", + " }\n", + " }\n", + " sync_replicas: true\n", + " optimizer {\n", + " momentum_optimizer {\n", + " learning_rate {\n", + " cosine_decay_learning_rate {\n", + " learning_rate_base: 0.08\n", + " total_steps: 50000\n", + " warmup_learning_rate: 0.026666\n", + " warmup_steps: 1000\n", + " }\n", + " }\n", + " momentum_optimizer_value: 0.9\n", + " }\n", + " use_moving_average: false\n", + " }\n", + " fine_tune_checkpoint: \"PATH_TO_BE_CONFIGURED\"\n", + " num_steps: 50000\n", + " startup_delay_steps: 0.0\n", + " replicas_to_aggregate: 8\n", + " max_number_of_boxes: 100\n", + " unpad_groundtruth_tensors: false\n", + " fine_tune_checkpoint_type: \"classification\"\n", + " fine_tune_checkpoint_version: V2,\n", + " 'train_input_config': label_map_path: \"PATH_TO_BE_CONFIGURED\"\n", + " tf_record_input_reader {\n", + " input_path: \"PATH_TO_BE_CONFIGURED\"\n", + " },\n", + " 'eval_config': metrics_set: \"coco_detection_metrics\"\n", + " use_moving_averages: false,\n", + " 'eval_input_configs': [label_map_path: \"PATH_TO_BE_CONFIGURED\"\n", + " shuffle: false\n", + " num_epochs: 1\n", + " tf_record_input_reader {\n", + " input_path: \"PATH_TO_BE_CONFIGURED\"\n", + " }\n", + " ],\n", + " 'eval_input_config': label_map_path: \"PATH_TO_BE_CONFIGURED\"\n", + " shuffle: false\n", + " num_epochs: 1\n", + " tf_record_input_reader {\n", + " input_path: \"PATH_TO_BE_CONFIGURED\"\n", + " }}" + ] + }, + "execution_count": 36, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "config" + ] + }, + { + "cell_type": "code", + "execution_count": 38, + "id": "0388764e-757e-4daa-bff7-ceb64451cffe", + "metadata": {}, + "outputs": [], + "source": [ + "pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()\n", + "with tf.io.gfile.GFile(CONFIG_PATH, \"r\") as f: \n", + " proto_str = f.read() \n", + " text_format.Merge(proto_str, pipeline_config) " + ] + }, + { + "cell_type": "code", + "execution_count": 40, + "id": "b5c0f29c-efcb-48f2-83bc-3986d8e4a122", + "metadata": {}, + "outputs": [], + "source": [ + "pipeline_config.model.ssd.num_classes = 2\n", + "pipeline_config.train_config.batch_size = 4\n", + "pipeline_config.train_config.fine_tune_checkpoint = PRETRAINED_MODEL_PATH+'/ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8/checkpoint/ckpt-0'\n", + "pipeline_config.train_config.fine_tune_checkpoint_type = \"detection\"\n", + "pipeline_config.train_input_reader.label_map_path= ANNOTATION_PATH + '/label_map.pbtxt'\n", + "pipeline_config.train_input_reader.tf_record_input_reader.input_path[:] = [ANNOTATION_PATH + '/train.record']\n", + "pipeline_config.eval_input_reader[0].label_map_path = ANNOTATION_PATH + '/label_map.pbtxt'\n", + "pipeline_config.eval_input_reader[0].tf_record_input_reader.input_path[:] = [ANNOTATION_PATH + '/test.record']" + ] + }, + { + "cell_type": "code", + "execution_count": 42, + "id": "60751988-beee-4510-85dc-4cad9502907f", + "metadata": {}, + "outputs": [], + "source": [ + "config_text = text_format.MessageToString(pipeline_config) \n", + "with tf.io.gfile.GFile(CONFIG_PATH, \"wb\") as f: \n", + " f.write(config_text) " + ] + }, + { + "cell_type": "code", + "execution_count": 44, + "id": "023f6a06-6470-4072-a35c-826eb428fe3f", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "python Tensorflow/models/research/object_detection/model_main_tf2.py --model_dir=Tensorflow/workspace/models/my_ssd_mobnet --pipeline_config_path=Tensorflow/workspace/models/my_ssd_mobnet/pipeline.config --num_train_steps=5000\n" + ] + } + ], + "source": [ + "print(\"\"\"python {}/research/object_detection/model_main_tf2.py --model_dir={}/{} --pipeline_config_path={}/{}/pipeline.config --num_train_steps=5000\"\"\".format(APIMODEL_PATH, MODEL_PATH,CUSTOM_MODEL_NAME,MODEL_PATH,CUSTOM_MODEL_NAME))" + ] + }, + { + "cell_type": "code", + "execution_count": 46, + "id": "f0ecced1-75ee-4bc1-b2aa-68a553d0d900", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Traceback (most recent call last):\n", + " File \"/Users/venkateshms/Documents/RealTimeObjectDetection/Tensorflow/models/research/object_detection/model_main_tf2.py\", line 31, in \n", + " from object_detection import model_lib_v2\n", + " File \"/opt/anaconda3/lib/python3.12/site-packages/object_detection/model_lib_v2.py\", line 30, in \n", + " from object_detection import inputs\n", + " File \"/opt/anaconda3/lib/python3.12/site-packages/object_detection/inputs.py\", line 24, in \n", + " from tensorflow.compat.v1 import estimator as tf_estimator\n", + "ImportError: cannot import name 'estimator' from 'tensorflow.compat.v1' (/opt/anaconda3/lib/python3.12/site-packages/tensorflow/_api/v2/compat/v1/__init__.py)\n" + ] + } + ], + "source": [ + "!python Tensorflow/models/research/object_detection/model_main_tf2.py \\\n", + " --model_dir=Tensorflow/workspace/models/my_ssd_mobnet \\\n", + " --pipeline_config_path=Tensorflow/workspace/models/my_ssd_mobnet/pipeline.config \\\n", + " --num_train_steps=5000\n" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "1df131cf-5e4b-4929-bc4a-73c240f5d4b5", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Collecting tensorflow-io\n", + " Using cached tensorflow_io-0.37.1-cp310-cp310-macosx_12_0_arm64.whl.metadata (14 kB)\n", + "Collecting tensorflow-io-gcs-filesystem==0.37.1 (from tensorflow-io)\n", + " Using cached tensorflow_io_gcs_filesystem-0.37.1-cp310-cp310-macosx_12_0_arm64.whl.metadata (14 kB)\n", + "Using cached tensorflow_io-0.37.1-cp310-cp310-macosx_12_0_arm64.whl (31.8 MB)\n", + "Using cached tensorflow_io_gcs_filesystem-0.37.1-cp310-cp310-macosx_12_0_arm64.whl (3.5 MB)\n", + "Installing collected packages: tensorflow-io-gcs-filesystem, tensorflow-io\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m2/2\u001b[0m [tensorflow-io]0m [tensorflow-io]\n", + "\u001b[1A\u001b[2KSuccessfully installed tensorflow-io-0.37.1 tensorflow-io-gcs-filesystem-0.37.1\n" + ] + } + ], + "source": [ + "!pip install tensorflow-io\n" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "ab3b8de2-d614-47d3-8e53-f7234e53967a", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "/opt/anaconda3/envs/tfod2/lib/python3.10/site-packages/google/api_core/_python_version_support.py:266: FutureWarning: You are using a Python version (3.10.19) which Google will stop supporting in new releases of google.api_core once it reaches its end of life (2026-10-04). Please upgrade to the latest Python version, or at least Python 3.11, to continue receiving updates for google.api_core past that date.\n", + " warnings.warn(message, FutureWarning)\n", + "/opt/anaconda3/envs/tfod2/lib/python3.10/site-packages/tensorflow_io/python/ops/__init__.py:98: UserWarning: unable to load libtensorflow_io_plugins.so: unable to open file: libtensorflow_io_plugins.so, from paths: ['/opt/anaconda3/envs/tfod2/lib/python3.10/site-packages/tensorflow_io/python/ops/libtensorflow_io_plugins.so']\n", + "caused by: [\"dlopen(/opt/anaconda3/envs/tfod2/lib/python3.10/site-packages/tensorflow_io/python/ops/libtensorflow_io_plugins.so, 0x0006): symbol not found in flat namespace '__ZN3tsl2io7DirnameENSt3__117basic_string_viewIcNS1_11char_traitsIcEEEE'\"]\n", + " warnings.warn(f\"unable to load libtensorflow_io_plugins.so: {e}\")\n", + "/opt/anaconda3/envs/tfod2/lib/python3.10/site-packages/tensorflow_io/python/ops/__init__.py:104: UserWarning: file system plugins are not loaded: unable to open file: libtensorflow_io.so, from paths: ['/opt/anaconda3/envs/tfod2/lib/python3.10/site-packages/tensorflow_io/python/ops/libtensorflow_io.so']\n", + "caused by: [\"dlopen(/opt/anaconda3/envs/tfod2/lib/python3.10/site-packages/tensorflow_io/python/ops/libtensorflow_io.so, 0x0006): symbol not found in flat namespace '__ZN3tsl2io19BufferedInputStream10ReadNBytesExPNS_7tstringE'\"]\n", + " warnings.warn(f\"file system plugins are not loaded: {e}\")\n", + "Traceback (most recent call last):\n", + " File \"/opt/anaconda3/envs/tfod2/lib/python3.10/runpy.py\", line 196, in _run_module_as_main\n", + " return _run_code(code, main_globals, None,\n", + " File \"/opt/anaconda3/envs/tfod2/lib/python3.10/runpy.py\", line 86, in _run_code\n", + " exec(code, run_globals)\n", + " File \"/opt/anaconda3/envs/tfod2/lib/python3.10/site-packages/object_detection/model_main_tf2.py\", line 31, in \n", + " from object_detection import model_lib_v2\n", + " File \"/opt/anaconda3/envs/tfod2/lib/python3.10/site-packages/object_detection/model_lib_v2.py\", line 30, in \n", + " from object_detection import inputs\n", + " File \"/opt/anaconda3/envs/tfod2/lib/python3.10/site-packages/object_detection/inputs.py\", line 27, in \n", + " from object_detection.builders import model_builder\n", + " File \"/opt/anaconda3/envs/tfod2/lib/python3.10/site-packages/object_detection/builders/model_builder.py\", line 70, in \n", + " from object_detection.models import ssd_efficientnet_bifpn_feature_extractor as ssd_efficientnet_bifpn\n", + " File \"/opt/anaconda3/envs/tfod2/lib/python3.10/site-packages/object_detection/models/ssd_efficientnet_bifpn_feature_extractor.py\", line 35, in \n", + " from official.legacy.image_classification.efficientnet import efficientnet_model\n", + " File \"/opt/anaconda3/envs/tfod2/lib/python3.10/site-packages/official/legacy/image_classification/efficientnet/efficientnet_model.py\", line 30, in \n", + " import tensorflow as tf, tf_keras\n", + " File \"/opt/anaconda3/envs/tfod2/lib/python3.10/site-packages/tf_keras/__init__.py\", line 3, in \n", + " from tf_keras import __internal__\n", + " File \"/opt/anaconda3/envs/tfod2/lib/python3.10/site-packages/tf_keras/__internal__/__init__.py\", line 3, in \n", + " from tf_keras.__internal__ import backend\n", + " File \"/opt/anaconda3/envs/tfod2/lib/python3.10/site-packages/tf_keras/__internal__/backend/__init__.py\", line 3, in \n", + " from tf_keras.src.backend import _initialize_variables as initialize_variables\n", + " File \"/opt/anaconda3/envs/tfod2/lib/python3.10/site-packages/tf_keras/src/__init__.py\", line 21, in \n", + " from tf_keras.src import applications\n", + " File \"/opt/anaconda3/envs/tfod2/lib/python3.10/site-packages/tf_keras/src/applications/__init__.py\", line 18, in \n", + " from tf_keras.src.applications.convnext import ConvNeXtBase\n", + " File \"/opt/anaconda3/envs/tfod2/lib/python3.10/site-packages/tf_keras/src/applications/convnext.py\", line 28, in \n", + " from tf_keras.src import backend\n", + " File \"/opt/anaconda3/envs/tfod2/lib/python3.10/site-packages/tf_keras/src/backend.py\", line 35, in \n", + " from tf_keras.src.engine import keras_tensor\n", + " File \"/opt/anaconda3/envs/tfod2/lib/python3.10/site-packages/tf_keras/src/engine/keras_tensor.py\", line 19, in \n", + " from tf_keras.src.utils import object_identity\n", + " File \"/opt/anaconda3/envs/tfod2/lib/python3.10/site-packages/tf_keras/src/utils/__init__.py\", line 53, in \n", + " from tf_keras.src.utils.feature_space import FeatureSpace\n", + " File \"/opt/anaconda3/envs/tfod2/lib/python3.10/site-packages/tf_keras/src/utils/feature_space.py\", line 20, in \n", + " from tf_keras.src.engine import base_layer\n", + " File \"/opt/anaconda3/envs/tfod2/lib/python3.10/site-packages/tf_keras/src/engine/base_layer.py\", line 35, in \n", + " from tf_keras.src.dtensor import lazy_variable\n", + " File \"/opt/anaconda3/envs/tfod2/lib/python3.10/site-packages/tf_keras/src/dtensor/lazy_variable.py\", line 23, in \n", + " from tensorflow.python.framework import tensor\n", + "ImportError: cannot import name 'tensor' from 'tensorflow.python.framework' (/opt/anaconda3/envs/tfod2/lib/python3.10/site-packages/tensorflow/python/framework/__init__.py)\n" + ] + } + ], + "source": [ + "!python -m object_detection.model_main_tf2 \\\n", + " --model_dir=Tensorflow/workspace/models/my_ssd_mobnet \\\n", + " --pipeline_config_path=Tensorflow/workspace/models/my_ssd_mobnet/pipeline.config \\\n", + " --num_train_steps=5000\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "cc0dd285-4b45-43f2-bf66-e20b292edc04", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "5868324a-5bfb-4c27-ad2e-4e24c4ca9907", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[33mWARNING: Skipping tensorflow as it is not installed.\u001b[0m\u001b[33m\n", + "\u001b[0mFound existing installation: tf_keras 2.19.0\n", + "Uninstalling tf_keras-2.19.0:\n", + " Successfully uninstalled tf_keras-2.19.0\n", + "Found existing installation: tf-models-official 2.19.1\n", + "Uninstalling tf-models-official-2.19.1:\n", + " Successfully uninstalled tf-models-official-2.19.1\n", + "Found existing installation: tensorflow-io 0.37.1\n", + "Uninstalling tensorflow-io-0.37.1:\n", + " Successfully uninstalled tensorflow-io-0.37.1\n", + "Collecting tensorflow==2.15.0\n", + " Downloading tensorflow-2.15.0-cp310-cp310-macosx_12_0_arm64.whl.metadata (3.6 kB)\n", + "Collecting tensorflow-macos==2.15.0 (from tensorflow==2.15.0)\n", + " Downloading tensorflow_macos-2.15.0-cp310-cp310-macosx_12_0_arm64.whl.metadata (4.2 kB)\n", + "Requirement already satisfied: absl-py>=1.0.0 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tensorflow-macos==2.15.0->tensorflow==2.15.0) (1.4.0)\n", + "Requirement already satisfied: astunparse>=1.6.0 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tensorflow-macos==2.15.0->tensorflow==2.15.0) (1.6.3)\n", + "Requirement already satisfied: flatbuffers>=23.5.26 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tensorflow-macos==2.15.0->tensorflow==2.15.0) (25.9.23)\n", + "Requirement already satisfied: gast!=0.5.0,!=0.5.1,!=0.5.2,>=0.2.1 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tensorflow-macos==2.15.0->tensorflow==2.15.0) (0.4.0)\n", + "Requirement already satisfied: google-pasta>=0.1.1 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tensorflow-macos==2.15.0->tensorflow==2.15.0) (0.2.0)\n", + "Requirement already satisfied: h5py>=2.9.0 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tensorflow-macos==2.15.0->tensorflow==2.15.0) (3.15.1)\n", + "Requirement already satisfied: libclang>=13.0.0 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tensorflow-macos==2.15.0->tensorflow==2.15.0) (18.1.1)\n", + "Collecting ml-dtypes~=0.2.0 (from tensorflow-macos==2.15.0->tensorflow==2.15.0)\n", + " Downloading ml_dtypes-0.2.0-cp310-cp310-macosx_10_9_universal2.whl.metadata (20 kB)\n", + "Requirement already satisfied: numpy<2.0.0,>=1.23.5 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tensorflow-macos==2.15.0->tensorflow==2.15.0) (1.26.4)\n", + "Requirement already satisfied: opt-einsum>=2.3.2 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tensorflow-macos==2.15.0->tensorflow==2.15.0) (3.4.0)\n", + "Requirement already satisfied: packaging in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tensorflow-macos==2.15.0->tensorflow==2.15.0) (25.0)\n", + "Requirement already satisfied: protobuf!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5,<5.0.0dev,>=3.20.3 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tensorflow-macos==2.15.0->tensorflow==2.15.0) (3.20.3)\n", + "Requirement already satisfied: setuptools in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tensorflow-macos==2.15.0->tensorflow==2.15.0) (80.9.0)\n", + "Requirement already satisfied: six>=1.12.0 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tensorflow-macos==2.15.0->tensorflow==2.15.0) (1.17.0)\n", + "Requirement already satisfied: termcolor>=1.1.0 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tensorflow-macos==2.15.0->tensorflow==2.15.0) (3.2.0)\n", + "Requirement already satisfied: typing-extensions>=3.6.6 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tensorflow-macos==2.15.0->tensorflow==2.15.0) (4.15.0)\n", + "Collecting wrapt<1.15,>=1.11.0 (from tensorflow-macos==2.15.0->tensorflow==2.15.0)\n", + " Downloading wrapt-1.14.2-cp310-cp310-macosx_11_0_arm64.whl.metadata (6.5 kB)\n", + "Requirement already satisfied: tensorflow-io-gcs-filesystem>=0.23.1 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tensorflow-macos==2.15.0->tensorflow==2.15.0) (0.37.1)\n", + "Requirement already satisfied: grpcio<2.0,>=1.24.3 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tensorflow-macos==2.15.0->tensorflow==2.15.0) (1.65.5)\n", + "Collecting tensorboard<2.16,>=2.15 (from tensorflow-macos==2.15.0->tensorflow==2.15.0)\n", + " Downloading tensorboard-2.15.2-py3-none-any.whl.metadata (1.7 kB)\n", + "Collecting tensorflow-estimator<2.16,>=2.15.0 (from tensorflow-macos==2.15.0->tensorflow==2.15.0)\n", + " Downloading tensorflow_estimator-2.15.0-py2.py3-none-any.whl.metadata (1.3 kB)\n", + "Collecting keras<2.16,>=2.15.0 (from tensorflow-macos==2.15.0->tensorflow==2.15.0)\n", + " Downloading keras-2.15.0-py3-none-any.whl.metadata (2.4 kB)\n", + "Requirement already satisfied: google-auth<3,>=1.6.3 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tensorboard<2.16,>=2.15->tensorflow-macos==2.15.0->tensorflow==2.15.0) (2.43.0)\n", + "Collecting google-auth-oauthlib<2,>=0.5 (from tensorboard<2.16,>=2.15->tensorflow-macos==2.15.0->tensorflow==2.15.0)\n", + " Downloading google_auth_oauthlib-1.2.3-py3-none-any.whl.metadata (3.1 kB)\n", + "Requirement already satisfied: markdown>=2.6.8 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tensorboard<2.16,>=2.15->tensorflow-macos==2.15.0->tensorflow==2.15.0) (3.10)\n", + "Requirement already satisfied: requests<3,>=2.21.0 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tensorboard<2.16,>=2.15->tensorflow-macos==2.15.0->tensorflow==2.15.0) (2.32.5)\n", + "Collecting tensorboard-data-server<0.8.0,>=0.7.0 (from tensorboard<2.16,>=2.15->tensorflow-macos==2.15.0->tensorflow==2.15.0)\n", + " Using cached tensorboard_data_server-0.7.2-py3-none-any.whl.metadata (1.1 kB)\n", + "Requirement already satisfied: werkzeug>=1.0.1 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tensorboard<2.16,>=2.15->tensorflow-macos==2.15.0->tensorflow==2.15.0) (3.1.3)\n", + "Requirement already satisfied: cachetools<7.0,>=2.0.0 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from google-auth<3,>=1.6.3->tensorboard<2.16,>=2.15->tensorflow-macos==2.15.0->tensorflow==2.15.0) (6.2.1)\n", + "Requirement already satisfied: pyasn1-modules>=0.2.1 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from google-auth<3,>=1.6.3->tensorboard<2.16,>=2.15->tensorflow-macos==2.15.0->tensorflow==2.15.0) (0.4.2)\n", + "Requirement already satisfied: rsa<5,>=3.1.4 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from google-auth<3,>=1.6.3->tensorboard<2.16,>=2.15->tensorflow-macos==2.15.0->tensorflow==2.15.0) (4.9.1)\n", + "Collecting google-auth<3,>=1.6.3 (from tensorboard<2.16,>=2.15->tensorflow-macos==2.15.0->tensorflow==2.15.0)\n", + " Downloading google_auth-2.41.1-py2.py3-none-any.whl.metadata (6.6 kB)\n", + "Requirement already satisfied: requests-oauthlib>=0.7.0 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from google-auth-oauthlib<2,>=0.5->tensorboard<2.16,>=2.15->tensorflow-macos==2.15.0->tensorflow==2.15.0) (2.0.0)\n", + "Requirement already satisfied: charset_normalizer<4,>=2 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from requests<3,>=2.21.0->tensorboard<2.16,>=2.15->tensorflow-macos==2.15.0->tensorflow==2.15.0) (3.4.4)\n", + "Requirement already satisfied: idna<4,>=2.5 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from requests<3,>=2.21.0->tensorboard<2.16,>=2.15->tensorflow-macos==2.15.0->tensorflow==2.15.0) (3.11)\n", + "Requirement already satisfied: urllib3<3,>=1.21.1 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from requests<3,>=2.21.0->tensorboard<2.16,>=2.15->tensorflow-macos==2.15.0->tensorflow==2.15.0) (2.5.0)\n", + "Requirement already satisfied: certifi>=2017.4.17 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from requests<3,>=2.21.0->tensorboard<2.16,>=2.15->tensorflow-macos==2.15.0->tensorflow==2.15.0) (2025.10.5)\n", + "Requirement already satisfied: pyasn1>=0.1.3 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from rsa<5,>=3.1.4->google-auth<3,>=1.6.3->tensorboard<2.16,>=2.15->tensorflow-macos==2.15.0->tensorflow==2.15.0) (0.6.1)\n", + "Requirement already satisfied: wheel<1.0,>=0.23.0 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from astunparse>=1.6.0->tensorflow-macos==2.15.0->tensorflow==2.15.0) (0.45.1)\n", + "Requirement already satisfied: oauthlib>=3.0.0 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from requests-oauthlib>=0.7.0->google-auth-oauthlib<2,>=0.5->tensorboard<2.16,>=2.15->tensorflow-macos==2.15.0->tensorflow==2.15.0) (3.3.1)\n", + "Requirement already satisfied: MarkupSafe>=2.1.1 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from werkzeug>=1.0.1->tensorboard<2.16,>=2.15->tensorflow-macos==2.15.0->tensorflow==2.15.0) (3.0.3)\n", + "Downloading tensorflow-2.15.0-cp310-cp310-macosx_12_0_arm64.whl (2.1 kB)\n", + "Downloading tensorflow_macos-2.15.0-cp310-cp310-macosx_12_0_arm64.whl (208.8 MB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m208.8/208.8 MB\u001b[0m \u001b[31m40.5 MB/s\u001b[0m \u001b[33m0:00:05\u001b[0mm0:00:01\u001b[0m00:01\u001b[0m\n", + "\u001b[?25hDownloading keras-2.15.0-py3-none-any.whl (1.7 MB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.7/1.7 MB\u001b[0m \u001b[31m15.9 MB/s\u001b[0m \u001b[33m0:00:00\u001b[0m\n", + "\u001b[?25hDownloading ml_dtypes-0.2.0-cp310-cp310-macosx_10_9_universal2.whl (1.2 MB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.2/1.2 MB\u001b[0m \u001b[31m31.2 MB/s\u001b[0m \u001b[33m0:00:00\u001b[0m\n", + "\u001b[?25hDownloading tensorboard-2.15.2-py3-none-any.whl (5.5 MB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m5.5/5.5 MB\u001b[0m \u001b[31m23.1 MB/s\u001b[0m \u001b[33m0:00:00\u001b[0m eta \u001b[36m0:00:01\u001b[0m\n", + "\u001b[?25hDownloading google_auth_oauthlib-1.2.3-py3-none-any.whl (19 kB)\n", + "Downloading google_auth-2.41.1-py2.py3-none-any.whl (221 kB)\n", + "Using cached tensorboard_data_server-0.7.2-py3-none-any.whl (2.4 kB)\n", + "Downloading tensorflow_estimator-2.15.0-py2.py3-none-any.whl (441 kB)\n", + "Downloading wrapt-1.14.2-cp310-cp310-macosx_11_0_arm64.whl (35 kB)\n", + "Installing collected packages: wrapt, tensorflow-estimator, tensorboard-data-server, ml-dtypes, keras, google-auth, google-auth-oauthlib, tensorboard, tensorflow-macos, tensorflow\n", + "\u001b[2K Attempting uninstall: wrapt\n", + "\u001b[2K Found existing installation: wrapt 2.0.1\n", + "\u001b[2K Uninstalling wrapt-2.0.1:\n", + "\u001b[2K Successfully uninstalled wrapt-2.0.1\n", + "\u001b[2K Attempting uninstall: tensorflow-estimator\n", + "\u001b[2K Found existing installation: tensorflow-estimator 2.10.0\n", + "\u001b[2K Uninstalling tensorflow-estimator-2.10.0:\n", + "\u001b[2K Successfully uninstalled tensorflow-estimator-2.10.0\n", + "\u001b[2K Attempting uninstall: tensorboard-data-server━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m 1/10\u001b[0m [tensorflow-estimator]\n", + "\u001b[2K Found existing installation: tensorboard-data-server 0.6.1\u001b[0m \u001b[32m 1/10\u001b[0m [tensorflow-estimator]\n", + "\u001b[2K Uninstalling tensorboard-data-server-0.6.1:━━━━━━━━━━━━━━━\u001b[0m \u001b[32m 1/10\u001b[0m [tensorflow-estimator]\n", + "\u001b[2K Successfully uninstalled tensorboard-data-server-0.6.1━━\u001b[0m \u001b[32m 1/10\u001b[0m [tensorflow-estimator]\n", + "\u001b[2K Attempting uninstall: ml-dtypes━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m 1/10\u001b[0m [tensorflow-estimator]\n", + "\u001b[2K Found existing installation: ml_dtypes 0.5.3━━━━━━━━━━━━━━\u001b[0m \u001b[32m 1/10\u001b[0m [tensorflow-estimator]\n", + "\u001b[2K Uninstalling ml_dtypes-0.5.3:━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m 1/10\u001b[0m [tensorflow-estimator]\n", + "\u001b[2K Successfully uninstalled ml_dtypes-0.5.3━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m 1/10\u001b[0m [tensorflow-estimator]\n", + "\u001b[2K Attempting uninstall: keras━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m 1/10\u001b[0m [tensorflow-estimator]\n", + "\u001b[2K Found existing installation: keras 2.10.0━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m 4/10\u001b[0m [keras]-estimator]\n", + "\u001b[2K Uninstalling keras-2.10.0:\u001b[0m\u001b[90m━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m 4/10\u001b[0m [keras]\n", + "\u001b[2K Successfully uninstalled keras-2.10.0━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m 4/10\u001b[0m [keras]\n", + "\u001b[2K Attempting uninstall: google-auth[0m\u001b[90m━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m 4/10\u001b[0m [keras]\n", + "\u001b[2K Found existing installation: google-auth 2.43.0━━━━━━━━━━━\u001b[0m \u001b[32m 4/10\u001b[0m [keras]\n", + "\u001b[2K Uninstalling google-auth-2.43.0:90m━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m 4/10\u001b[0m [keras]\n", + "\u001b[2K Successfully uninstalled google-auth-2.43.0━━━━━━━━━━━━━\u001b[0m \u001b[32m 4/10\u001b[0m [keras]\n", + "\u001b[2K Attempting uninstall: google-auth-oauthlib90m━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m 5/10\u001b[0m [google-auth]\n", + "\u001b[2K Found existing installation: google-auth-oauthlib 0.4.6━━━\u001b[0m \u001b[32m 5/10\u001b[0m [google-auth]\n", + "\u001b[2K Uninstalling google-auth-oauthlib-0.4.6:━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m 5/10\u001b[0m [google-auth]\n", + "\u001b[2K Successfully uninstalled google-auth-oauthlib-0.4.6━━━━━\u001b[0m \u001b[32m 5/10\u001b[0m [google-auth]\n", + "\u001b[2K Attempting uninstall: tensorboard[0m\u001b[90m━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m 5/10\u001b[0m [google-auth]\n", + "\u001b[2K Found existing installation: tensorboard 2.10.1━━━━━━━━━━━\u001b[0m \u001b[32m 5/10\u001b[0m [google-auth]\n", + "\u001b[2K Uninstalling tensorboard-2.10.1:0m\u001b[90m━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m 5/10\u001b[0m [google-auth]\n", + "\u001b[2K Successfully uninstalled tensorboard-2.10.1m\u001b[90m━━━━━━━━━━━\u001b[0m \u001b[32m 7/10\u001b[0m [tensorboard]\n", + "\u001b[2K Attempting uninstall: tensorflow-macos\u001b[90m╺\u001b[0m\u001b[90m━━━━━━━━━━━\u001b[0m \u001b[32m 7/10\u001b[0m [tensorboard]\n", + "\u001b[2K Found existing installation: tensorflow-macos 2.10.0━━━━━━\u001b[0m \u001b[32m 7/10\u001b[0m [tensorboard]\n", + "\u001b[2K Uninstalling tensorflow-macos-2.10.0:[0m\u001b[90m╺\u001b[0m\u001b[90m━━━━━━━\u001b[0m \u001b[32m 8/10\u001b[0m [tensorflow-macos]\n", + "\u001b[2K Successfully uninstalled tensorflow-macos-2.10.0\u001b[90m━━━━━━━\u001b[0m \u001b[32m 8/10\u001b[0m [tensorflow-macos]\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m10/10\u001b[0m [tensorflow]0\u001b[0m [tensorflow-macos]\n", + "\u001b[1A\u001b[2K\u001b[31mERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\n", + "object-detection 0.1 requires tensorflow_io, which is not installed.\n", + "object-detection 0.1 requires tf-models-official>=2.5.1, which is not installed.\u001b[0m\u001b[31m\n", + "\u001b[0mSuccessfully installed google-auth-2.41.1 google-auth-oauthlib-1.2.3 keras-2.15.0 ml-dtypes-0.2.0 tensorboard-2.15.2 tensorboard-data-server-0.7.2 tensorflow-2.15.0 tensorflow-estimator-2.15.0 tensorflow-macos-2.15.0 wrapt-1.14.2\n", + "Collecting tensorflow-io==0.37.1\n", + " Using cached tensorflow_io-0.37.1-cp310-cp310-macosx_12_0_arm64.whl.metadata (14 kB)\n", + "Requirement already satisfied: tensorflow-io-gcs-filesystem==0.37.1 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tensorflow-io==0.37.1) (0.37.1)\n", + "Using cached tensorflow_io-0.37.1-cp310-cp310-macosx_12_0_arm64.whl (31.8 MB)\n", + "Installing collected packages: tensorflow-io\n", + "\u001b[31mERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\n", + "object-detection 0.1 requires tf-models-official>=2.5.1, which is not installed.\u001b[0m\u001b[31m\n", + "\u001b[0mSuccessfully installed tensorflow-io-0.37.1\n", + "Collecting tf-models-official==2.15.0\n", + " Downloading tf_models_official-2.15.0-py2.py3-none-any.whl.metadata (1.4 kB)\n", + "Requirement already satisfied: Cython in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tf-models-official==2.15.0) (3.2.0)\n", + "Requirement already satisfied: Pillow in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tf-models-official==2.15.0) (12.0.0)\n", + "Requirement already satisfied: gin-config in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tf-models-official==2.15.0) (0.5.0)\n", + "Requirement already satisfied: google-api-python-client>=1.6.7 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tf-models-official==2.15.0) (2.187.0)\n", + "Requirement already satisfied: immutabledict in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tf-models-official==2.15.0) (4.2.2)\n", + "Requirement already satisfied: kaggle>=1.3.9 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tf-models-official==2.15.0) (1.7.4.5)\n", + "Requirement already satisfied: matplotlib in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tf-models-official==2.15.0) (3.10.6)\n", + "Requirement already satisfied: numpy>=1.20 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tf-models-official==2.15.0) (1.26.4)\n", + "Requirement already satisfied: oauth2client in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tf-models-official==2.15.0) (4.1.3)\n", + "Requirement already satisfied: opencv-python-headless in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tf-models-official==2.15.0) (4.11.0.86)\n", + "Requirement already satisfied: pandas>=0.22.0 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tf-models-official==2.15.0) (2.3.3)\n", + "Requirement already satisfied: psutil>=5.4.3 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tf-models-official==2.15.0) (7.1.3)\n", + "Requirement already satisfied: py-cpuinfo>=3.3.0 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tf-models-official==2.15.0) (9.0.0)\n", + "Requirement already satisfied: pycocotools in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tf-models-official==2.15.0) (2.0.10)\n", + "Requirement already satisfied: pyyaml>=6.0.0 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tf-models-official==2.15.0) (6.0.3)\n", + "Requirement already satisfied: sacrebleu in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tf-models-official==2.15.0) (2.2.0)\n", + "Requirement already satisfied: scipy>=0.19.1 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tf-models-official==2.15.0) (1.15.3)\n", + "Requirement already satisfied: sentencepiece in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tf-models-official==2.15.0) (0.2.1)\n", + "Requirement already satisfied: seqeval in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tf-models-official==2.15.0) (1.2.2)\n", + "Requirement already satisfied: six in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tf-models-official==2.15.0) (1.17.0)\n", + "Requirement already satisfied: tensorflow-datasets in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tf-models-official==2.15.0) (4.9.9)\n", + "Collecting tensorflow-hub>=0.6.0 (from tf-models-official==2.15.0)\n", + " Using cached tensorflow_hub-0.16.1-py2.py3-none-any.whl.metadata (1.3 kB)\n", + "Collecting tensorflow-model-optimization>=0.4.1 (from tf-models-official==2.15.0)\n", + " Using cached tensorflow_model_optimization-0.8.0-py2.py3-none-any.whl.metadata (904 bytes)\n", + "INFO: pip is looking at multiple versions of tf-models-official to determine which version is compatible with other requirements. This could take a while.\n", + "\u001b[31mERROR: Could not find a version that satisfies the requirement tensorflow-text~=2.15.0 (from tf-models-official) (from versions: 2.17.0rc0, 2.17.0, 2.18.0rc0, 2.18.0, 2.18.1, 2.19.0rc0, 2.19.0)\u001b[0m\u001b[31m\n", + "\u001b[0m\u001b[31mERROR: No matching distribution found for tensorflow-text~=2.15.0\u001b[0m\u001b[31m\n", + "\u001b[0m" + ] + } + ], + "source": [ + "!pip uninstall -y tensorflow tf-keras tf-models-official tensorflow-io\n", + "!pip install tensorflow==2.15.0\n", + "!pip install tensorflow-io==0.37.1\n", + "!pip install tf-models-official==2.15.0\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "58450985-7f87-49ad-b245-592d0e2f6bd4", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Collecting tensorflow-text==2.18.0\n", + " Downloading tensorflow_text-2.18.0-cp310-cp310-macosx_11_0_arm64.whl.metadata (1.8 kB)\n", + "Collecting tensorflow<2.19,>=2.18.0 (from tensorflow-text==2.18.0)\n", + " Downloading tensorflow-2.18.1-cp310-cp310-macosx_12_0_arm64.whl.metadata (4.0 kB)\n", + "Requirement already satisfied: absl-py>=1.0.0 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tensorflow<2.19,>=2.18.0->tensorflow-text==2.18.0) (1.4.0)\n", + "Requirement already satisfied: astunparse>=1.6.0 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tensorflow<2.19,>=2.18.0->tensorflow-text==2.18.0) (1.6.3)\n", + "Requirement already satisfied: flatbuffers>=24.3.25 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tensorflow<2.19,>=2.18.0->tensorflow-text==2.18.0) (25.9.23)\n", + "Requirement already satisfied: gast!=0.5.0,!=0.5.1,!=0.5.2,>=0.2.1 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tensorflow<2.19,>=2.18.0->tensorflow-text==2.18.0) (0.4.0)\n", + "Requirement already satisfied: google-pasta>=0.1.1 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tensorflow<2.19,>=2.18.0->tensorflow-text==2.18.0) (0.2.0)\n", + "Requirement already satisfied: libclang>=13.0.0 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tensorflow<2.19,>=2.18.0->tensorflow-text==2.18.0) (18.1.1)\n", + "Requirement already satisfied: opt-einsum>=2.3.2 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tensorflow<2.19,>=2.18.0->tensorflow-text==2.18.0) (3.4.0)\n", + "Requirement already satisfied: packaging in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tensorflow<2.19,>=2.18.0->tensorflow-text==2.18.0) (25.0)\n", + "Requirement already satisfied: protobuf!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5,<6.0.0dev,>=3.20.3 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tensorflow<2.19,>=2.18.0->tensorflow-text==2.18.0) (3.20.3)\n", + "Requirement already satisfied: requests<3,>=2.21.0 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tensorflow<2.19,>=2.18.0->tensorflow-text==2.18.0) (2.32.5)\n", + "Requirement already satisfied: setuptools in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tensorflow<2.19,>=2.18.0->tensorflow-text==2.18.0) (80.9.0)\n", + "Requirement already satisfied: six>=1.12.0 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tensorflow<2.19,>=2.18.0->tensorflow-text==2.18.0) (1.17.0)\n", + "Requirement already satisfied: termcolor>=1.1.0 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tensorflow<2.19,>=2.18.0->tensorflow-text==2.18.0) (3.2.0)\n", + "Requirement already satisfied: typing-extensions>=3.6.6 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tensorflow<2.19,>=2.18.0->tensorflow-text==2.18.0) (4.15.0)\n", + "Requirement already satisfied: wrapt>=1.11.0 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tensorflow<2.19,>=2.18.0->tensorflow-text==2.18.0) (1.14.2)\n", + "Requirement already satisfied: grpcio<2.0,>=1.24.3 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tensorflow<2.19,>=2.18.0->tensorflow-text==2.18.0) (1.65.5)\n", + "Collecting tensorboard<2.19,>=2.18 (from tensorflow<2.19,>=2.18.0->tensorflow-text==2.18.0)\n", + " Downloading tensorboard-2.18.0-py3-none-any.whl.metadata (1.6 kB)\n", + "Collecting keras>=3.5.0 (from tensorflow<2.19,>=2.18.0->tensorflow-text==2.18.0)\n", + " Using cached keras-3.12.0-py3-none-any.whl.metadata (5.9 kB)\n", + "Requirement already satisfied: numpy<2.1.0,>=1.26.0 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tensorflow<2.19,>=2.18.0->tensorflow-text==2.18.0) (1.26.4)\n", + "Requirement already satisfied: h5py>=3.11.0 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tensorflow<2.19,>=2.18.0->tensorflow-text==2.18.0) (3.15.1)\n", + "Collecting ml-dtypes<1.0.0,>=0.4.0 (from tensorflow<2.19,>=2.18.0->tensorflow-text==2.18.0)\n", + " Using cached ml_dtypes-0.5.3-cp310-cp310-macosx_10_9_universal2.whl.metadata (8.9 kB)\n", + "Requirement already satisfied: tensorflow-io-gcs-filesystem>=0.23.1 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tensorflow<2.19,>=2.18.0->tensorflow-text==2.18.0) (0.37.1)\n", + "Requirement already satisfied: charset_normalizer<4,>=2 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from requests<3,>=2.21.0->tensorflow<2.19,>=2.18.0->tensorflow-text==2.18.0) (3.4.4)\n", + "Requirement already satisfied: idna<4,>=2.5 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from requests<3,>=2.21.0->tensorflow<2.19,>=2.18.0->tensorflow-text==2.18.0) (3.11)\n", + "Requirement already satisfied: urllib3<3,>=1.21.1 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from requests<3,>=2.21.0->tensorflow<2.19,>=2.18.0->tensorflow-text==2.18.0) (2.5.0)\n", + "Requirement already satisfied: certifi>=2017.4.17 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from requests<3,>=2.21.0->tensorflow<2.19,>=2.18.0->tensorflow-text==2.18.0) (2025.10.5)\n", + "Requirement already satisfied: markdown>=2.6.8 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tensorboard<2.19,>=2.18->tensorflow<2.19,>=2.18.0->tensorflow-text==2.18.0) (3.10)\n", + "Requirement already satisfied: tensorboard-data-server<0.8.0,>=0.7.0 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tensorboard<2.19,>=2.18->tensorflow<2.19,>=2.18.0->tensorflow-text==2.18.0) (0.7.2)\n", + "Requirement already satisfied: werkzeug>=1.0.1 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tensorboard<2.19,>=2.18->tensorflow<2.19,>=2.18.0->tensorflow-text==2.18.0) (3.1.3)\n", + "Requirement already satisfied: wheel<1.0,>=0.23.0 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from astunparse>=1.6.0->tensorflow<2.19,>=2.18.0->tensorflow-text==2.18.0) (0.45.1)\n", + "Requirement already satisfied: rich in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from keras>=3.5.0->tensorflow<2.19,>=2.18.0->tensorflow-text==2.18.0) (14.2.0)\n", + "Requirement already satisfied: namex in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from keras>=3.5.0->tensorflow<2.19,>=2.18.0->tensorflow-text==2.18.0) (0.1.0)\n", + "Requirement already satisfied: optree in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from keras>=3.5.0->tensorflow<2.19,>=2.18.0->tensorflow-text==2.18.0) (0.17.0)\n", + "Requirement already satisfied: MarkupSafe>=2.1.1 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from werkzeug>=1.0.1->tensorboard<2.19,>=2.18->tensorflow<2.19,>=2.18.0->tensorflow-text==2.18.0) (3.0.3)\n", + "Requirement already satisfied: markdown-it-py>=2.2.0 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from rich->keras>=3.5.0->tensorflow<2.19,>=2.18.0->tensorflow-text==2.18.0) (4.0.0)\n", + "Requirement already satisfied: pygments<3.0.0,>=2.13.0 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from rich->keras>=3.5.0->tensorflow<2.19,>=2.18.0->tensorflow-text==2.18.0) (2.19.2)\n", + "Requirement already satisfied: mdurl~=0.1 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from markdown-it-py>=2.2.0->rich->keras>=3.5.0->tensorflow<2.19,>=2.18.0->tensorflow-text==2.18.0) (0.1.2)\n", + "Downloading tensorflow_text-2.18.0-cp310-cp310-macosx_11_0_arm64.whl (6.1 MB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m6.1/6.1 MB\u001b[0m \u001b[31m16.0 MB/s\u001b[0m \u001b[33m0:00:00\u001b[0mm0:00:01\u001b[0m00:01\u001b[0m\n", + "\u001b[?25hDownloading tensorflow-2.18.1-cp310-cp310-macosx_12_0_arm64.whl (239.4 MB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m239.4/239.4 MB\u001b[0m \u001b[31m20.2 MB/s\u001b[0m \u001b[33m0:00:11\u001b[0mm0:00:01\u001b[0m00:01\u001b[0m\n", + "\u001b[?25hUsing cached ml_dtypes-0.5.3-cp310-cp310-macosx_10_9_universal2.whl (667 kB)\n", + "Downloading tensorboard-2.18.0-py3-none-any.whl (5.5 MB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m5.5/5.5 MB\u001b[0m \u001b[31m16.8 MB/s\u001b[0m \u001b[33m0:00:00\u001b[0m eta \u001b[36m0:00:01\u001b[0m\n", + "\u001b[?25hUsing cached keras-3.12.0-py3-none-any.whl (1.5 MB)\n", + "Installing collected packages: ml-dtypes, tensorboard, keras, tensorflow, tensorflow-text\n", + "\u001b[2K Attempting uninstall: ml-dtypes\n", + "\u001b[2K Found existing installation: ml-dtypes 0.2.0\n", + "\u001b[2K Uninstalling ml-dtypes-0.2.0:\n", + "\u001b[2K Successfully uninstalled ml-dtypes-0.2.0\n", + "\u001b[2K Attempting uninstall: tensorboard\n", + "\u001b[2K Found existing installation: tensorboard 2.15.2\n", + "\u001b[2K Uninstalling tensorboard-2.15.2:\n", + "\u001b[2K Successfully uninstalled tensorboard-2.15.2\n", + "\u001b[2K Attempting uninstall: kerasm\u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1/5\u001b[0m [tensorboard]\n", + "\u001b[2K Found existing installation: keras 2.15.0━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1/5\u001b[0m [tensorboard]\n", + "\u001b[2K Uninstalling keras-2.15.0:m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1/5\u001b[0m [tensorboard]\n", + "\u001b[2K Successfully uninstalled keras-2.15.0━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1/5\u001b[0m [tensorboard]\n", + "\u001b[2K Attempting uninstall: tensorflow\u001b[0m\u001b[90m━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m2/5\u001b[0m [keras]d]\n", + "\u001b[2K Found existing installation: tensorflow 2.15.0━━━━━━━━━━━━\u001b[0m \u001b[32m2/5\u001b[0m [keras]\n", + "\u001b[2K Uninstalling tensorflow-2.15.0:[90m━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m2/5\u001b[0m [keras]\n", + "\u001b[2K Successfully uninstalled tensorflow-2.15.0━━━━━━━━━━━━━━\u001b[0m \u001b[32m2/5\u001b[0m [keras]\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m5/5\u001b[0m [tensorflow-text] [tensorflow-text]\n", + "\u001b[1A\u001b[2K\u001b[31mERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\n", + "object-detection 0.1 requires tf-models-official>=2.5.1, which is not installed.\n", + "tensorflow-macos 2.15.0 requires keras<2.16,>=2.15.0, but you have keras 3.12.0 which is incompatible.\n", + "tensorflow-macos 2.15.0 requires ml-dtypes~=0.2.0, but you have ml-dtypes 0.5.3 which is incompatible.\n", + "tensorflow-macos 2.15.0 requires tensorboard<2.16,>=2.15, but you have tensorboard 2.18.0 which is incompatible.\u001b[0m\u001b[31m\n", + "\u001b[0mSuccessfully installed keras-3.12.0 ml-dtypes-0.5.3 tensorboard-2.18.0 tensorflow-2.18.1 tensorflow-text-2.18.0\n", + "Collecting tf-models-official==2.15.0\n", + " Using cached tf_models_official-2.15.0-py2.py3-none-any.whl.metadata (1.4 kB)\n", + "Downloading tf_models_official-2.15.0-py2.py3-none-any.whl (2.7 MB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m2.7/2.7 MB\u001b[0m \u001b[31m15.4 MB/s\u001b[0m \u001b[33m0:00:00\u001b[0mm0:00:01\u001b[0m\n", + "\u001b[?25hInstalling collected packages: tf-models-official\n", + "Successfully installed tf-models-official-2.15.0\n" + ] + } + ], + "source": [ + "!pip install tensorflow-text==2.18.0\n", + "!pip install tf-models-official==2.15.0 --no-deps\n" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "092e3c6c-bdd8-4e8e-82c8-6a9b0a0bfa2c", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/opt/anaconda3/envs/tfod2/lib/python3.10/site-packages/google/api_core/_python_version_support.py:266: FutureWarning: You are using a Python version (3.10.19) which Google will stop supporting in new releases of google.api_core once it reaches its end of life (2026-10-04). Please upgrade to the latest Python version, or at least Python 3.11, to continue receiving updates for google.api_core past that date.\n", + " warnings.warn(message, FutureWarning)\n" + ] + }, + { + "ename": "NotFoundError", + "evalue": "dlopen(/opt/anaconda3/envs/tfod2/lib/python3.10/site-packages/tensorflow-plugins/libmetal_plugin.dylib, 0x0006): symbol not found in flat namespace '__ZN10tensorflow8internal10LogMessage16VmoduleActivatedEPKci'", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mNotFoundError\u001b[0m Traceback (most recent call last)", + "Cell \u001b[0;32mIn[7], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;21;01mtensorflow\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;28;01mas\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;21;01mtf\u001b[39;00m\n\u001b[1;32m 2\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;21;01mtensorflow_io\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;28;01mas\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;21;01mtfio\u001b[39;00m\n\u001b[1;32m 3\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;21;01mtensorflow_text\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;28;01mas\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;21;01mtext\u001b[39;00m\n", + "File \u001b[0;32m/opt/anaconda3/envs/tfod2/lib/python3.10/site-packages/tensorflow/__init__.py:437\u001b[0m\n\u001b[1;32m 435\u001b[0m _plugin_dir \u001b[38;5;241m=\u001b[39m _os\u001b[38;5;241m.\u001b[39mpath\u001b[38;5;241m.\u001b[39mjoin(_s, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mtensorflow-plugins\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[1;32m 436\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m _os\u001b[38;5;241m.\u001b[39mpath\u001b[38;5;241m.\u001b[39mexists(_plugin_dir):\n\u001b[0;32m--> 437\u001b[0m \u001b[43m_ll\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mload_library\u001b[49m\u001b[43m(\u001b[49m\u001b[43m_plugin_dir\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 438\u001b[0m \u001b[38;5;66;03m# Load Pluggable Device Library\u001b[39;00m\n\u001b[1;32m 439\u001b[0m _ll\u001b[38;5;241m.\u001b[39mload_pluggable_device_library(_plugin_dir)\n", + "File \u001b[0;32m/opt/anaconda3/envs/tfod2/lib/python3.10/site-packages/tensorflow/python/framework/load_library.py:151\u001b[0m, in \u001b[0;36mload_library\u001b[0;34m(library_location)\u001b[0m\n\u001b[1;32m 148\u001b[0m kernel_libraries \u001b[38;5;241m=\u001b[39m [library_location]\n\u001b[1;32m 150\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m lib \u001b[38;5;129;01min\u001b[39;00m kernel_libraries:\n\u001b[0;32m--> 151\u001b[0m \u001b[43mpy_tf\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mTF_LoadLibrary\u001b[49m\u001b[43m(\u001b[49m\u001b[43mlib\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 153\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 154\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mOSError\u001b[39;00m(\n\u001b[1;32m 155\u001b[0m errno\u001b[38;5;241m.\u001b[39mENOENT,\n\u001b[1;32m 156\u001b[0m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mThe file or folder to load kernel libraries from does not exist.\u001b[39m\u001b[38;5;124m'\u001b[39m,\n\u001b[1;32m 157\u001b[0m library_location)\n", + "\u001b[0;31mNotFoundError\u001b[0m: dlopen(/opt/anaconda3/envs/tfod2/lib/python3.10/site-packages/tensorflow-plugins/libmetal_plugin.dylib, 0x0006): symbol not found in flat namespace '__ZN10tensorflow8internal10LogMessage16VmoduleActivatedEPKci'" + ] + } + ], + "source": [ + "import tensorflow as tf\n", + "import tensorflow_io as tfio\n", + "import tensorflow_text as text\n", + "import tensorflow_hub as hub\n", + "import tf_models_official as tfo\n", + "\n", + "print(\"TensorFlow:\", tf.__version__)\n", + "print(\"TensorFlow I/O:\", tfio.__version__)\n", + "print(\"TensorFlow Text:\", text.__version__)\n", + "print(\"TF Models Official:\", tfo.__version__)\n", + "print(\"GPU Devices:\", tf.config.list_physical_devices('GPU'))\n" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "64da689a-8b3b-4cb8-96ac-729873d73281", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Found existing installation: tensorflow-metal 0.6.0\n", + "Uninstalling tensorflow-metal-0.6.0:\n", + " Successfully uninstalled tensorflow-metal-0.6.0\n", + "Collecting tensorflow-metal==1.1.0\n", + " Downloading tensorflow_metal-1.1.0-cp310-cp310-macosx_12_0_arm64.whl.metadata (1.2 kB)\n", + "Requirement already satisfied: wheel~=0.35 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tensorflow-metal==1.1.0) (0.45.1)\n", + "Requirement already satisfied: six>=1.15.0 in /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages (from tensorflow-metal==1.1.0) (1.17.0)\n", + "Downloading tensorflow_metal-1.1.0-cp310-cp310-macosx_12_0_arm64.whl (1.4 MB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.4/1.4 MB\u001b[0m \u001b[31m12.0 MB/s\u001b[0m \u001b[33m0:00:00\u001b[0m\n", + "\u001b[?25hInstalling collected packages: tensorflow-metal\n", + "Successfully installed tensorflow-metal-1.1.0\n" + ] + } + ], + "source": [ + "# 1️⃣ Remove the old, incompatible Metal plugin\n", + "!pip uninstall -y tensorflow-metal\n", + "\n", + "# 2️⃣ Install the matching plugin for TensorFlow 2.18\n", + "!pip install tensorflow-metal==1.1.0\n" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "64383a21-4a0b-470a-a078-acca4391b3b4", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/opt/anaconda3/envs/tfod2/lib/python3.10/site-packages/google/api_core/_python_version_support.py:266: FutureWarning: You are using a Python version (3.10.19) which Google will stop supporting in new releases of google.api_core once it reaches its end of life (2026-10-04). Please upgrade to the latest Python version, or at least Python 3.11, to continue receiving updates for google.api_core past that date.\n", + " warnings.warn(message, FutureWarning)\n" + ] + }, + { + "ename": "NotFoundError", + "evalue": "dlopen(/opt/anaconda3/envs/tfod2/lib/python3.10/site-packages/tensorflow-plugins/libmetal_plugin.dylib, 0x0006): Symbol not found: __ZN3tsl8internal10LogMessageC1EPKcii\n Referenced from: /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages/tensorflow-plugins/libmetal_plugin.dylib\n Expected in: /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages/tensorflow/python/_pywrap_tensorflow_internal.so", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mNotFoundError\u001b[0m Traceback (most recent call last)", + "Cell \u001b[0;32mIn[1], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;21;01mtensorflow\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;28;01mas\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;21;01mtf\u001b[39;00m\n\u001b[1;32m 2\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mTensorFlow:\u001b[39m\u001b[38;5;124m\"\u001b[39m, tf\u001b[38;5;241m.\u001b[39m__version__)\n\u001b[1;32m 3\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mAvailable GPUs:\u001b[39m\u001b[38;5;124m\"\u001b[39m, tf\u001b[38;5;241m.\u001b[39mconfig\u001b[38;5;241m.\u001b[39mlist_physical_devices(\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mGPU\u001b[39m\u001b[38;5;124m'\u001b[39m))\n", + "File \u001b[0;32m/opt/anaconda3/envs/tfod2/lib/python3.10/site-packages/tensorflow/__init__.py:437\u001b[0m\n\u001b[1;32m 435\u001b[0m _plugin_dir \u001b[38;5;241m=\u001b[39m _os\u001b[38;5;241m.\u001b[39mpath\u001b[38;5;241m.\u001b[39mjoin(_s, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mtensorflow-plugins\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[1;32m 436\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m _os\u001b[38;5;241m.\u001b[39mpath\u001b[38;5;241m.\u001b[39mexists(_plugin_dir):\n\u001b[0;32m--> 437\u001b[0m \u001b[43m_ll\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mload_library\u001b[49m\u001b[43m(\u001b[49m\u001b[43m_plugin_dir\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 438\u001b[0m \u001b[38;5;66;03m# Load Pluggable Device Library\u001b[39;00m\n\u001b[1;32m 439\u001b[0m _ll\u001b[38;5;241m.\u001b[39mload_pluggable_device_library(_plugin_dir)\n", + "File \u001b[0;32m/opt/anaconda3/envs/tfod2/lib/python3.10/site-packages/tensorflow/python/framework/load_library.py:151\u001b[0m, in \u001b[0;36mload_library\u001b[0;34m(library_location)\u001b[0m\n\u001b[1;32m 148\u001b[0m kernel_libraries \u001b[38;5;241m=\u001b[39m [library_location]\n\u001b[1;32m 150\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m lib \u001b[38;5;129;01min\u001b[39;00m kernel_libraries:\n\u001b[0;32m--> 151\u001b[0m \u001b[43mpy_tf\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mTF_LoadLibrary\u001b[49m\u001b[43m(\u001b[49m\u001b[43mlib\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 153\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 154\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mOSError\u001b[39;00m(\n\u001b[1;32m 155\u001b[0m errno\u001b[38;5;241m.\u001b[39mENOENT,\n\u001b[1;32m 156\u001b[0m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mThe file or folder to load kernel libraries from does not exist.\u001b[39m\u001b[38;5;124m'\u001b[39m,\n\u001b[1;32m 157\u001b[0m library_location)\n", + "\u001b[0;31mNotFoundError\u001b[0m: dlopen(/opt/anaconda3/envs/tfod2/lib/python3.10/site-packages/tensorflow-plugins/libmetal_plugin.dylib, 0x0006): Symbol not found: __ZN3tsl8internal10LogMessageC1EPKcii\n Referenced from: /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages/tensorflow-plugins/libmetal_plugin.dylib\n Expected in: /opt/anaconda3/envs/tfod2/lib/python3.10/site-packages/tensorflow/python/_pywrap_tensorflow_internal.so" + ] + } + ], + "source": [ + "import tensorflow as tf\n", + "print(\"TensorFlow:\", tf.__version__)\n", + "print(\"Available GPUs:\", tf.config.list_physical_devices('GPU'))\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2101c341-c16b-4a90-8ce2-4c039888a2b9", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python (tfod2)", + "language": "python", + "name": "tfod2" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.19" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/.ipynb_checkpoints/Tutorial-checkpoint.ipynb b/.ipynb_checkpoints/Tutorial-checkpoint.ipynb new file mode 100644 index 00000000..99fe06fd --- /dev/null +++ b/.ipynb_checkpoints/Tutorial-checkpoint.ipynb @@ -0,0 +1,635 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Resources Used\n", + "- wget.download('https://tensorflow-object-detection-api-tutorial.readthedocs.io/en/latest/_downloads/da4babe668a8afb093cc7776d7e630f3/generate_tfrecord.py')\n", + "- Setup https://tensorflow-object-detection-api-tutorial.readthedocs.io/en/latest/install.html" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# 0. Setup Paths" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "WORKSPACE_PATH = 'Tensorflow/workspace'\n", + "SCRIPTS_PATH = 'Tensorflow/scripts'\n", + "APIMODEL_PATH = 'Tensorflow/models'\n", + "ANNOTATION_PATH = WORKSPACE_PATH+'/annotations'\n", + "IMAGE_PATH = WORKSPACE_PATH+'/images'\n", + "MODEL_PATH = WORKSPACE_PATH+'/models'\n", + "PRETRAINED_MODEL_PATH = WORKSPACE_PATH+'/pre-trained-models'\n", + "CONFIG_PATH = MODEL_PATH+'/my_ssd_mobnet/pipeline.config'\n", + "CHECKPOINT_PATH = MODEL_PATH+'/my_ssd_mobnet/'" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# 1. Create Label Map" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "labels = [{'name':'Mask', 'id':1}, {'name':'NoMask', 'id':2}]\n", + "\n", + "with open(ANNOTATION_PATH + '\\label_map.pbtxt', 'w') as f:\n", + " for label in labels:\n", + " f.write('item { \\n')\n", + " f.write('\\tname:\\'{}\\'\\n'.format(label['name']))\n", + " f.write('\\tid:{}\\n'.format(label['id']))\n", + " f.write('}\\n')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# 2. Create TF records" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Successfully created the TFRecord file: Tensorflow/workspace/annotations/train.record\n", + "Successfully created the TFRecord file: Tensorflow/workspace/annotations/test.record\n" + ] + } + ], + "source": [ + "!python {SCRIPTS_PATH + '/generate_tfrecord.py'} -x {IMAGE_PATH + '/train'} -l {ANNOTATION_PATH + '/label_map.pbtxt'} -o {ANNOTATION_PATH + '/train.record'}\n", + "!python {SCRIPTS_PATH + '/generate_tfrecord.py'} -x{IMAGE_PATH + '/test'} -l {ANNOTATION_PATH + '/label_map.pbtxt'} -o {ANNOTATION_PATH + '/test.record'}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# 3. Download TF Models Pretrained Models from Tensorflow Model Zoo" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Cloning into 'models'...\n" + ] + } + ], + "source": [ + "!cd Tensorflow && git clone https://github.com/tensorflow/models" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "#wget.download('http://download.tensorflow.org/models/object_detection/tf2/20200711/ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8.tar.gz')\n", + "#!mv ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8.tar.gz {PRETRAINED_MODEL_PATH}\n", + "#!cd {PRETRAINED_MODEL_PATH} && tar -zxvf ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8.tar.gz" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# 4. Copy Model Config to Training Folder" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "CUSTOM_MODEL_NAME = 'my_ssd_mobnet' " + ] + }, + { + "cell_type": "code", + "execution_count": 47, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "A subdirectory or file Tensorflow\\workspace\\models\\my_ssd_mobnet already exists.\n" + ] + } + ], + "source": [ + "!mkdir {'Tensorflow\\workspace\\models\\\\'+CUSTOM_MODEL_NAME}\n", + "!cp {PRETRAINED_MODEL_PATH+'/ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8/pipeline.config'} {MODEL_PATH+'/'+CUSTOM_MODEL_NAME}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# 5. Update Config For Transfer Learning" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import tensorflow as tf\n", + "from object_detection.utils import config_util\n", + "from object_detection.protos import pipeline_pb2\n", + "from google.protobuf import text_format" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "CONFIG_PATH = MODEL_PATH+'/'+CUSTOM_MODEL_NAME+'/pipeline.config'" + ] + }, + { + "cell_type": "code", + "execution_count": 55, + "metadata": {}, + "outputs": [], + "source": [ + "config = config_util.get_configs_from_pipeline_file(CONFIG_PATH)" + ] + }, + { + "cell_type": "code", + "execution_count": 56, + "metadata": { + "collapsed": true + }, + "outputs": [ + { + "data": { + "text/plain": [ + "{'model': ssd {\n", + " num_classes: 2\n", + " image_resizer {\n", + " fixed_shape_resizer {\n", + " height: 320\n", + " width: 320\n", + " }\n", + " }\n", + " feature_extractor {\n", + " type: \"ssd_mobilenet_v2_fpn_keras\"\n", + " depth_multiplier: 1.0\n", + " min_depth: 16\n", + " conv_hyperparams {\n", + " regularizer {\n", + " l2_regularizer {\n", + " weight: 4e-05\n", + " }\n", + " }\n", + " initializer {\n", + " random_normal_initializer {\n", + " mean: 0.0\n", + " stddev: 0.01\n", + " }\n", + " }\n", + " activation: RELU_6\n", + " batch_norm {\n", + " decay: 0.997\n", + " scale: true\n", + " epsilon: 0.001\n", + " }\n", + " }\n", + " use_depthwise: true\n", + " override_base_feature_extractor_hyperparams: true\n", + " fpn {\n", + " min_level: 3\n", + " max_level: 7\n", + " additional_layer_depth: 128\n", + " }\n", + " }\n", + " box_coder {\n", + " faster_rcnn_box_coder {\n", + " y_scale: 10.0\n", + " x_scale: 10.0\n", + " height_scale: 5.0\n", + " width_scale: 5.0\n", + " }\n", + " }\n", + " matcher {\n", + " argmax_matcher {\n", + " matched_threshold: 0.5\n", + " unmatched_threshold: 0.5\n", + " ignore_thresholds: false\n", + " negatives_lower_than_unmatched: true\n", + " force_match_for_each_row: true\n", + " use_matmul_gather: true\n", + " }\n", + " }\n", + " similarity_calculator {\n", + " iou_similarity {\n", + " }\n", + " }\n", + " box_predictor {\n", + " weight_shared_convolutional_box_predictor {\n", + " conv_hyperparams {\n", + " regularizer {\n", + " l2_regularizer {\n", + " weight: 4e-05\n", + " }\n", + " }\n", + " initializer {\n", + " random_normal_initializer {\n", + " mean: 0.0\n", + " stddev: 0.01\n", + " }\n", + " }\n", + " activation: RELU_6\n", + " batch_norm {\n", + " decay: 0.997\n", + " scale: true\n", + " epsilon: 0.001\n", + " }\n", + " }\n", + " depth: 128\n", + " num_layers_before_predictor: 4\n", + " kernel_size: 3\n", + " class_prediction_bias_init: -4.6\n", + " share_prediction_tower: true\n", + " use_depthwise: true\n", + " }\n", + " }\n", + " anchor_generator {\n", + " multiscale_anchor_generator {\n", + " min_level: 3\n", + " max_level: 7\n", + " anchor_scale: 4.0\n", + " aspect_ratios: 1.0\n", + " aspect_ratios: 2.0\n", + " aspect_ratios: 0.5\n", + " scales_per_octave: 2\n", + " }\n", + " }\n", + " post_processing {\n", + " batch_non_max_suppression {\n", + " score_threshold: 1e-08\n", + " iou_threshold: 0.6\n", + " max_detections_per_class: 100\n", + " max_total_detections: 100\n", + " use_static_shapes: false\n", + " }\n", + " score_converter: SIGMOID\n", + " }\n", + " normalize_loss_by_num_matches: true\n", + " loss {\n", + " localization_loss {\n", + " weighted_smooth_l1 {\n", + " }\n", + " }\n", + " classification_loss {\n", + " weighted_sigmoid_focal {\n", + " gamma: 2.0\n", + " alpha: 0.25\n", + " }\n", + " }\n", + " classification_weight: 1.0\n", + " localization_weight: 1.0\n", + " }\n", + " encode_background_as_zeros: true\n", + " normalize_loc_loss_by_codesize: true\n", + " inplace_batchnorm_update: true\n", + " freeze_batchnorm: false\n", + " }, 'train_config': batch_size: 4\n", + " data_augmentation_options {\n", + " random_horizontal_flip {\n", + " }\n", + " }\n", + " data_augmentation_options {\n", + " random_crop_image {\n", + " min_object_covered: 0.0\n", + " min_aspect_ratio: 0.75\n", + " max_aspect_ratio: 3.0\n", + " min_area: 0.75\n", + " max_area: 1.0\n", + " overlap_thresh: 0.0\n", + " }\n", + " }\n", + " sync_replicas: true\n", + " optimizer {\n", + " momentum_optimizer {\n", + " learning_rate {\n", + " cosine_decay_learning_rate {\n", + " learning_rate_base: 0.08\n", + " total_steps: 50000\n", + " warmup_learning_rate: 0.026666\n", + " warmup_steps: 1000\n", + " }\n", + " }\n", + " momentum_optimizer_value: 0.9\n", + " }\n", + " use_moving_average: false\n", + " }\n", + " fine_tune_checkpoint: \"Tensorflow/workspace/pre-trained-models/ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8/checkpoint/ckpt-0\"\n", + " num_steps: 50000\n", + " startup_delay_steps: 0.0\n", + " replicas_to_aggregate: 8\n", + " max_number_of_boxes: 100\n", + " unpad_groundtruth_tensors: false\n", + " fine_tune_checkpoint_type: \"detection\"\n", + " fine_tune_checkpoint_version: V2, 'train_input_config': label_map_path: \"Tensorflow/workspace/annotations/label_map.pbtxt\"\n", + " tf_record_input_reader {\n", + " input_path: \"Tensorflow/workspace/annotations/train.record\"\n", + " }, 'eval_config': metrics_set: \"coco_detection_metrics\"\n", + " use_moving_averages: false, 'eval_input_configs': [label_map_path: \"Tensorflow/workspace/annotations/label_map.pbtxt\"\n", + " shuffle: false\n", + " num_epochs: 1\n", + " tf_record_input_reader {\n", + " input_path: \"Tensorflow/workspace/annotations/test.record\"\n", + " }\n", + " ], 'eval_input_config': label_map_path: \"Tensorflow/workspace/annotations/label_map.pbtxt\"\n", + " shuffle: false\n", + " num_epochs: 1\n", + " tf_record_input_reader {\n", + " input_path: \"Tensorflow/workspace/annotations/test.record\"\n", + " }}" + ] + }, + "execution_count": 56, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "config" + ] + }, + { + "cell_type": "code", + "execution_count": 52, + "metadata": {}, + "outputs": [], + "source": [ + "pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()\n", + "with tf.io.gfile.GFile(CONFIG_PATH, \"r\") as f: \n", + " proto_str = f.read() \n", + " text_format.Merge(proto_str, pipeline_config) " + ] + }, + { + "cell_type": "code", + "execution_count": 53, + "metadata": {}, + "outputs": [], + "source": [ + "pipeline_config.model.ssd.num_classes = 2\n", + "pipeline_config.train_config.batch_size = 4\n", + "pipeline_config.train_config.fine_tune_checkpoint = PRETRAINED_MODEL_PATH+'/ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8/checkpoint/ckpt-0'\n", + "pipeline_config.train_config.fine_tune_checkpoint_type = \"detection\"\n", + "pipeline_config.train_input_reader.label_map_path= ANNOTATION_PATH + '/label_map.pbtxt'\n", + "pipeline_config.train_input_reader.tf_record_input_reader.input_path[:] = [ANNOTATION_PATH + '/train.record']\n", + "pipeline_config.eval_input_reader[0].label_map_path = ANNOTATION_PATH + '/label_map.pbtxt'\n", + "pipeline_config.eval_input_reader[0].tf_record_input_reader.input_path[:] = [ANNOTATION_PATH + '/test.record']" + ] + }, + { + "cell_type": "code", + "execution_count": 54, + "metadata": {}, + "outputs": [], + "source": [ + "config_text = text_format.MessageToString(pipeline_config) \n", + "with tf.io.gfile.GFile(CONFIG_PATH, \"wb\") as f: \n", + " f.write(config_text) " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# 6. Train the model" + ] + }, + { + "cell_type": "code", + "execution_count": 58, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "python Tensorflow/models/research/object_detection/model_main_tf2.py --model_dir=Tensorflow/workspace/models/my_ssd_mobnet --pipeline_config_path=Tensorflow/workspace/models/my_ssd_mobnet/pipeline.config --num_train_steps=5000\n" + ] + } + ], + "source": [ + "print(\"\"\"python {}/research/object_detection/model_main_tf2.py --model_dir={}/{} --pipeline_config_path={}/{}/pipeline.config --num_train_steps=5000\"\"\".format(APIMODEL_PATH, MODEL_PATH,CUSTOM_MODEL_NAME,MODEL_PATH,CUSTOM_MODEL_NAME))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# 7. Load Train Model From Checkpoint" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "from object_detection.utils import label_map_util\n", + "from object_detection.utils import visualization_utils as viz_utils\n", + "from object_detection.builders import model_builder" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [], + "source": [ + "# Load pipeline config and build a detection model\n", + "configs = config_util.get_configs_from_pipeline_file(CONFIG_PATH)\n", + "detection_model = model_builder.build(model_config=configs['model'], is_training=False)\n", + "\n", + "# Restore checkpoint\n", + "ckpt = tf.compat.v2.train.Checkpoint(model=detection_model)\n", + "ckpt.restore(os.path.join(CHECKPOINT_PATH, 'ckpt-6')).expect_partial()\n", + "\n", + "@tf.function\n", + "def detect_fn(image):\n", + " image, shapes = detection_model.preprocess(image)\n", + " prediction_dict = detection_model.predict(image, shapes)\n", + " detections = detection_model.postprocess(prediction_dict, shapes)\n", + " return detections" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# 8. Detect in Real-Time" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [], + "source": [ + "import cv2 \n", + "import numpy as np" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [], + "source": [ + "category_index = label_map_util.create_category_index_from_labelmap(ANNOTATION_PATH+'/label_map.pbtxt')" + ] + }, + { + "cell_type": "code", + "execution_count": 105, + "metadata": {}, + "outputs": [], + "source": [ + "cap.release()" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [], + "source": [ + "# Setup capture\n", + "cap = cv2.VideoCapture(0)\n", + "width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n", + "height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "while True: \n", + " ret, frame = cap.read()\n", + " image_np = np.array(frame)\n", + " \n", + " input_tensor = tf.convert_to_tensor(np.expand_dims(image_np, 0), dtype=tf.float32)\n", + " detections = detect_fn(input_tensor)\n", + " \n", + " num_detections = int(detections.pop('num_detections'))\n", + " detections = {key: value[0, :num_detections].numpy()\n", + " for key, value in detections.items()}\n", + " detections['num_detections'] = num_detections\n", + "\n", + " # detection_classes should be ints.\n", + " detections['detection_classes'] = detections['detection_classes'].astype(np.int64)\n", + "\n", + " label_id_offset = 1\n", + " image_np_with_detections = image_np.copy()\n", + "\n", + " viz_utils.visualize_boxes_and_labels_on_image_array(\n", + " image_np_with_detections,\n", + " detections['detection_boxes'],\n", + " detections['detection_classes']+label_id_offset,\n", + " detections['detection_scores'],\n", + " category_index,\n", + " use_normalized_coordinates=True,\n", + " max_boxes_to_draw=5,\n", + " min_score_thresh=.5,\n", + " agnostic_mode=False)\n", + "\n", + " cv2.imshow('object detection', cv2.resize(image_np_with_detections, (800, 600)))\n", + " \n", + " if cv2.waitKey(1) & 0xFF == ord('q'):\n", + " cap.release()\n", + " break" + ] + }, + { + "cell_type": "code", + "execution_count": 42, + "metadata": {}, + "outputs": [], + "source": [ + "detections = detect_fn(input_tensor)" + ] + }, + { + "cell_type": "code", + "execution_count": 67, + "metadata": {}, + "outputs": [], + "source": [ + "from matplotlib import pyplot as plt" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.4" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/.ipynb_checkpoints/Untitled1-checkpoint.ipynb b/.ipynb_checkpoints/Untitled1-checkpoint.ipynb new file mode 100644 index 00000000..2fd64429 --- /dev/null +++ b/.ipynb_checkpoints/Untitled1-checkpoint.ipynb @@ -0,0 +1,6 @@ +{ + "cells": [], + "metadata": {}, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 00000000..ed5b5f68 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,57 @@ +# ==== Base image: TF 2.5.1 with Jupyter (works on M1 with emulation) ==== +FROM --platform=linux/amd64 tensorflow/tensorflow:2.5.1-jupyter + +# Avoid interactive prompts +ENV DEBIAN_FRONTEND=noninteractive + +# ==== System packages needed for Object Detection API ==== +RUN apt-get update && \ + apt-get install -y --no-install-recommends \ + git \ + protobuf-compiler \ + python3-dev \ + build-essential \ + wget \ + curl \ + libglib2.0-0 \ + libsm6 \ + libxrender1 \ + libxext6 && \ + rm -rf /var/lib/apt/lists/* + +# ==== Python dependencies ==== +RUN pip install --no-cache-dir \ + pillow \ + lxml \ + Cython \ + contextlib2 \ + matplotlib \ + pandas \ + opencv-python-headless \ + pycocotools \ + tf_slim \ + protobuf==3.19.4 + +# ==== Clone TF Models Repo (working OD API version) ==== +RUN git clone https://github.com/tensorflow/models /models && \ + cd /models && git checkout 8d9ce6a58e5ef25e7bae647fdfb77b4b5c5d42e1 + +# ==== Build & Install Object Detection API ==== +WORKDIR /models/research + +# Compile protobuf files +RUN protoc object_detection/protos/*.proto --python_out=. + +# Install COCO API +RUN git clone https://github.com/cocodataset/cocoapi.git && \ + cd cocoapi/PythonAPI && \ + python3 setup.py build_ext --inplace && \ + python3 setup.py install + +# Install Object Detection API as a package +RUN cp object_detection/packages/tf2/setup.py . && \ + python3 -m pip install --no-cache-dir . + +# Back to notebook working directory +WORKDIR /tf + diff --git a/Tensorflow/scripts/generate_tfrecord.py b/Tensorflow/scripts/generate_tfrecord.py index caad456d..52a8a5f6 100644 --- a/Tensorflow/scripts/generate_tfrecord.py +++ b/Tensorflow/scripts/generate_tfrecord.py @@ -1,114 +1,40 @@ -""" Sample TensorFlow XML-to-TFRecord converter - -usage: generate_tfrecord.py [-h] [-x XML_DIR] [-l LABELS_PATH] [-o OUTPUT_PATH] [-i IMAGE_DIR] [-c CSV_PATH] +""" +Usage: + # Create train record: + python generate_tfrecord.py --csv_input=workspace/annotations/train_labels.csv --image_dir=workspace/images/train --output_path=workspace/annotations/train.record --label_map=workspace/annotations/label_map.pbtxt -optional arguments: - -h, --help show this help message and exit - -x XML_DIR, --xml_dir XML_DIR - Path to the folder where the input .xml files are stored. - -l LABELS_PATH, --labels_path LABELS_PATH - Path to the labels (.pbtxt) file. - -o OUTPUT_PATH, --output_path OUTPUT_PATH - Path of output TFRecord (.record) file. - -i IMAGE_DIR, --image_dir IMAGE_DIR - Path to the folder where the input image files are stored. Defaults to the same directory as XML_DIR. - -c CSV_PATH, --csv_path CSV_PATH - Path of output .csv file. If none provided, then no file will be written. + # Create test record: + python generate_tfrecord.py --csv_input=workspace/annotations/test_labels.csv --image_dir=workspace/images/test --output_path=workspace/annotations/test.record --label_map=workspace/annotations/label_map.pbtxt """ import os -import glob -import pandas as pd import io -import xml.etree.ElementTree as ET +import pandas as pd +import tensorflow as tf +tf.gfile =tf.io.gfile import argparse - -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging (1) -import tensorflow.compat.v1 as tf from PIL import Image from object_detection.utils import dataset_util, label_map_util from collections import namedtuple -# Initiate argument parser -parser = argparse.ArgumentParser( - description="Sample TensorFlow XML-to-TFRecord converter") -parser.add_argument("-x", - "--xml_dir", - help="Path to the folder where the input .xml files are stored.", - type=str) -parser.add_argument("-l", - "--labels_path", - help="Path to the labels (.pbtxt) file.", type=str) -parser.add_argument("-o", - "--output_path", - help="Path of output TFRecord (.record) file.", type=str) -parser.add_argument("-i", - "--image_dir", - help="Path to the folder where the input image files are stored. " - "Defaults to the same directory as XML_DIR.", - type=str, default=None) -parser.add_argument("-c", - "--csv_path", - help="Path of output .csv file. If none provided, then no file will be " - "written.", - type=str, default=None) - +# Define command-line arguments +parser = argparse.ArgumentParser(description='Generate TFRecord from CSV.') +parser.add_argument('--csv_input', type=str, required=True, help='Path to the CSV input') +parser.add_argument('--image_dir', type=str, required=True, help='Path to the image directory') +parser.add_argument('--output_path', type=str, required=True, help='Path to output TFRecord') +parser.add_argument('--label_map', type=str, required=True, help='Path to label map file') args = parser.parse_args() -if args.image_dir is None: - args.image_dir = args.xml_dir - -label_map = label_map_util.load_labelmap(args.labels_path) -label_map_dict = label_map_util.get_label_map_dict(label_map) - - -def xml_to_csv(path): - """Iterates through all .xml files (generated by labelImg) in a given directory and combines - them in a single Pandas dataframe. - - Parameters: - ---------- - path : str - The path containing the .xml files - Returns - ------- - Pandas DataFrame - The produced dataframe - """ - - xml_list = [] - for xml_file in glob.glob(path + '/*.xml'): - tree = ET.parse(xml_file) - root = tree.getroot() - for member in root.findall('object'): - value = (root.find('filename').text, - int(root.find('size')[0].text), - int(root.find('size')[1].text), - member[0].text, - int(member[4][0].text), - int(member[4][1].text), - int(member[4][2].text), - int(member[4][3].text) - ) - xml_list.append(value) - column_name = ['filename', 'width', 'height', - 'class', 'xmin', 'ymin', 'xmax', 'ymax'] - xml_df = pd.DataFrame(xml_list, columns=column_name) - return xml_df - - -def class_text_to_int(row_label): +def class_text_to_int(row_label, label_map_dict): return label_map_dict[row_label] - def split(df, group): data = namedtuple('data', ['filename', 'object']) gb = df.groupby(group) return [data(filename, gb.get_group(x)) for filename, x in zip(gb.groups.keys(), gb.groups)] - -def create_tf_example(group, path): - with tf.gfile.GFile(os.path.join(path, '{}'.format(group.filename)), 'rb') as fid: +def create_tf_example(group, path, label_map_dict): + with tf.io.gfile.GFile(os.path.join(path, '{}'.format(group.filename)), 'rb') as fid: encoded_jpg = fid.read() encoded_jpg_io = io.BytesIO(encoded_jpg) image = Image.open(encoded_jpg_io) @@ -129,7 +55,7 @@ def create_tf_example(group, path): ymins.append(row['ymin'] / height) ymaxs.append(row['ymax'] / height) classes_text.append(row['class'].encode('utf8')) - classes.append(class_text_to_int(row['class'])) + classes.append(class_text_to_int(row['class'], label_map_dict)) tf_example = tf.train.Example(features=tf.train.Features(feature={ 'image/height': dataset_util.int64_feature(height), @@ -147,22 +73,19 @@ def create_tf_example(group, path): })) return tf_example - -def main(_): - - writer = tf.python_io.TFRecordWriter(args.output_path) +def main(): + writer = tf.io.TFRecordWriter(args.output_path) path = os.path.join(args.image_dir) - examples = xml_to_csv(args.xml_dir) + examples = pd.read_csv(args.csv_input) + label_map_dict = label_map_util.get_label_map_dict(args.label_map) grouped = split(examples, 'filename') + for group in grouped: - tf_example = create_tf_example(group, path) + tf_example = create_tf_example(group, path, label_map_dict) writer.write(tf_example.SerializeToString()) + writer.close() - print('Successfully created the TFRecord file: {}'.format(args.output_path)) - if args.csv_path is not None: - examples.to_csv(args.csv_path, index=None) - print('Successfully created the CSV file: {}'.format(args.csv_path)) - + print(f'Successfully created the TFRecord file: {args.output_path}') if __name__ == '__main__': - tf.app.run() + main() \ No newline at end of file diff --git a/Tensorflow/workspace/annotations/label_map.pbtxt b/Tensorflow/workspace/annotations/label_map.pbtxt new file mode 100644 index 00000000..5f56b329 --- /dev/null +++ b/Tensorflow/workspace/annotations/label_map.pbtxt @@ -0,0 +1,20 @@ +item { + id: 1 + name: 'hello' +} +item { + id: 2 + name: 'iloveyou' +} +item { + id: 3 + name: 'no' +} +item { + id: 4 + name: 'thanks' +} +item { + id: 5 + name: 'yes' +} \ No newline at end of file diff --git a/Tensorflow/workspace/annotations/test.record b/Tensorflow/workspace/annotations/test.record new file mode 100644 index 00000000..849918a7 Binary files /dev/null and b/Tensorflow/workspace/annotations/test.record differ diff --git a/Tensorflow/workspace/annotations/test_labels.csv b/Tensorflow/workspace/annotations/test_labels.csv new file mode 100644 index 00000000..347dde05 --- /dev/null +++ b/Tensorflow/workspace/annotations/test_labels.csv @@ -0,0 +1,16 @@ +filename,width,height,class,xmin,ymin,xmax,ymax +hello.25228e95-bac4-11f0-8c23-5cbaef8df640.jpg,640,480,hello,406,206,640,480 +hello.26563750-bac4-11f0-acdb-5cbaef8df640.jpg,640,480,hello,381,94,640,480 +hello.27896177-bac4-11f0-a12f-5cbaef8df640.jpg,640,480,hello,318,107,620,480 +iloveyou.82984a8e-bac4-11f0-bcd1-5cbaef8df640.jpg,640,480,iloveyou,135,221,394,462 +iloveyou.85088187-bac4-11f0-a265-5cbaef8df640.jpg,640,480,iloveyou,147,181,425,469 +iloveyou.9130b2f5-bac4-11f0-9ee5-5cbaef8df640.jpg,640,480,iloveyou,149,163,447,445 +no.68417c0d-bac4-11f0-862c-5cbaef8df640.jpg,640,480,no,43,211,279,417 +no.6974adaf-bac4-11f0-980e-5cbaef8df640.jpg,640,480,no,89,246,359,453 +no.75834004-bac4-11f0-a7fa-5cbaef8df640.jpg,640,480,no,379,221,569,403 +thanks.398709fd-bac4-11f0-8cb2-5cbaef8df640.jpg,640,480,thanks,154,145,582,454 +thanks.4332f3a3-bac4-11f0-918e-5cbaef8df640.jpg,640,480,thanks,245,146,614,401 +thanks.44660a79-bac4-11f0-a87b-5cbaef8df640.jpg,640,480,thanks,120,130,474,378 +yes.516242a6-bac4-11f0-be17-5cbaef8df640.jpg,640,480,yes,71,209,295,443 +yes.57614b4f-bac4-11f0-bf72-5cbaef8df640.jpg,640,480,yes,496,204,640,436 +yes.5897143a-bac4-11f0-a547-5cbaef8df640.jpg,640,480,yes,513,159,640,395 diff --git a/Tensorflow/workspace/annotations/train.record b/Tensorflow/workspace/annotations/train.record new file mode 100644 index 00000000..fdecdc42 Binary files /dev/null and b/Tensorflow/workspace/annotations/train.record differ diff --git a/Tensorflow/workspace/annotations/train_labels.csv b/Tensorflow/workspace/annotations/train_labels.csv new file mode 100644 index 00000000..6e03da97 --- /dev/null +++ b/Tensorflow/workspace/annotations/train_labels.csv @@ -0,0 +1,61 @@ +filename,width,height,class,xmin,ymin,xmax,ymax +hello.1b79ecab-bac4-11f0-bf5a-5cbaef8df640.jpg,640,480,hello,141,185,385,480 +hello.1cb18098-bac4-11f0-9c2e-5cbaef8df640.jpg,640,480,hello,25,165,290,479 +hello.1de44c2e-bac4-11f0-9a08-5cbaef8df640.jpg,640,480,hello,1,127,236,480 +hello.1f1f9417-bac4-11f0-bf1c-5cbaef8df640.jpg,640,480,hello,132,300,366,480 +hello.20527c60-bac4-11f0-948e-5cbaef8df640.jpg,640,480,hello,197,205,473,480 +hello.2185addc-bac4-11f0-83fa-5cbaef8df640.jpg,640,480,hello,81,164,353,480 +hello.22b883c9-bac4-11f0-9c57-5cbaef8df640.jpg,640,480,hello,351,242,624,479 +hello.23eb6911-bac4-11f0-b1d3-5cbaef8df640.jpg,640,480,hello,390,213,640,480 +hello.28bc669c-bac4-11f0-af1e-5cbaef8df640.jpg,640,480,hello,274,183,531,480 +hello.29ef7d3c-bac4-11f0-ab70-5cbaef8df640.jpg,640,480,hello,351,133,636,477 +hello.2b22b5f1-bac4-11f0-a320-5cbaef8df640.jpg,640,480,hello,247,246,507,477 +hello.2c592c1a-bac4-11f0-a7cc-5cbaef8df640.jpg,640,480,hello,272,220,546,479 +iloveyou.802dd6fc-bac4-11f0-91ae-5cbaef8df640.jpg,640,480,iloveyou,70,179,357,478 +iloveyou.8160b525-bac4-11f0-96f4-5cbaef8df640.jpg,640,480,iloveyou,75,155,345,452 +iloveyou.83d03178-bac4-11f0-a4d2-5cbaef8df640.jpg,640,480,iloveyou,130,143,380,460 +iloveyou.863bb039-bac4-11f0-a027-5cbaef8df640.jpg,640,480,iloveyou,149,151,420,438 +iloveyou.876ed591-bac4-11f0-b414-5cbaef8df640.jpg,640,480,iloveyou,20,165,279,478 +iloveyou.88a21c55-bac4-11f0-a332-5cbaef8df640.jpg,640,480,iloveyou,245,89,561,384 +iloveyou.89de8c24-bac4-11f0-816c-5cbaef8df640.jpg,640,480,iloveyou,187,167,452,416 +iloveyou.8b190010-bac4-11f0-89ac-5cbaef8df640.jpg,640,480,iloveyou,309,156,586,406 +iloveyou.8c551a2e-bac4-11f0-ae2a-5cbaef8df640.jpg,640,480,iloveyou,269,121,518,379 +iloveyou.8d90dcb0-bac4-11f0-a75f-5cbaef8df640.jpg,640,480,iloveyou,339,101,608,381 +iloveyou.8ec39275-bac4-11f0-9943-5cbaef8df640.jpg,640,480,iloveyou,353,206,618,446 +iloveyou.8ff66380-bac4-11f0-8ac3-5cbaef8df640.jpg,640,480,iloveyou,180,184,454,450 +no.670e6e7e-bac4-11f0-b29f-5cbaef8df640.jpg,640,480,no,128,202,401,449 +no.6aafd544-bac4-11f0-9bd0-5cbaef8df640.jpg,640,480,no,95,181,332,399 +no.6be2ed99-bac4-11f0-a855-5cbaef8df640.jpg,640,480,no,130,196,365,412 +no.6d1d6102-bac4-11f0-b3bb-5cbaef8df640.jpg,640,480,no,144,256,376,448 +no.6e504eae-bac4-11f0-a6a6-5cbaef8df640.jpg,640,480,no,408,175,606,384 +no.6f83445d-bac4-11f0-8e39-5cbaef8df640.jpg,640,480,no,412,215,640,445 +no.70b638d7-bac4-11f0-b084-5cbaef8df640.jpg,640,480,no,263,221,553,442 +no.71e8e6bd-bac4-11f0-af2a-5cbaef8df640.jpg,640,480,no,358,237,561,407 +no.731c0dda-bac4-11f0-ad54-5cbaef8df640.jpg,640,480,no,186,290,431,480 +no.744f306e-bac4-11f0-9ab7-5cbaef8df640.jpg,640,480,no,199,274,451,453 +no.76b66b2e-bac4-11f0-8862-5cbaef8df640.jpg,640,480,no,357,220,567,411 +no.77eeede0-bac4-11f0-ad40-5cbaef8df640.jpg,640,480,no,359,332,576,480 +thanks.34b2a8d7-bac4-11f0-bcb4-5cbaef8df640.jpg,640,480,thanks,220,161,594,422 +thanks.35e5dd7d-bac4-11f0-ba11-5cbaef8df640.jpg,640,480,thanks,112,168,460,436 +thanks.3718a6e5-bac4-11f0-9e98-5cbaef8df640.jpg,640,480,thanks,181,164,542,430 +thanks.3853fd0e-bac4-11f0-b0c6-5cbaef8df640.jpg,640,480,thanks,218,162,624,448 +thanks.3ac0e6b6-bac4-11f0-b016-5cbaef8df640.jpg,640,480,thanks,110,161,484,452 +thanks.3bf93d6d-bac4-11f0-b8ca-5cbaef8df640.jpg,640,480,thanks,206,129,555,401 +thanks.3d2c85c1-bac4-11f0-a73c-5cbaef8df640.jpg,640,480,thanks,186,142,554,418 +thanks.3e5f99c3-bac4-11f0-95eb-5cbaef8df640.jpg,640,480,thanks,215,145,555,410 +thanks.3f9a0441-bac4-11f0-911f-5cbaef8df640.jpg,640,480,thanks,155,126,510,392 +thanks.40ccf7c0-bac4-11f0-af20-5cbaef8df640.jpg,640,480,thanks,237,159,595,451 +thanks.41ffd10f-bac4-11f0-a88e-5cbaef8df640.jpg,640,480,thanks,74,131,467,385 +thanks.45a0b4a4-bac4-11f0-81a3-5cbaef8df640.jpg,640,480,thanks,254,177,636,455 +yes.4dc27c96-bac4-11f0-a869-5cbaef8df640.jpg,640,480,yes,82,202,371,446 +yes.4ef54869-bac4-11f0-b6c7-5cbaef8df640.jpg,640,480,yes,99,244,359,480 +yes.5028f404-bac4-11f0-9cc2-5cbaef8df640.jpg,640,480,yes,126,227,388,466 +yes.52955cc7-bac4-11f0-9267-5cbaef8df640.jpg,640,480,yes,90,152,344,355 +yes.53c8132e-bac4-11f0-9127-5cbaef8df640.jpg,640,480,yes,171,159,441,407 +yes.54fadd9e-bac4-11f0-b8f2-5cbaef8df640.jpg,640,480,yes,11,146,248,376 +yes.562e2350-bac4-11f0-b951-5cbaef8df640.jpg,640,480,yes,4,190,260,385 +yes.59cd997e-bac4-11f0-b5ad-5cbaef8df640.jpg,640,480,yes,243,236,481,460 +yes.5b00a5b1-bac4-11f0-b2fb-5cbaef8df640.jpg,640,480,yes,406,304,607,480 +yes.5c3397f9-bac4-11f0-b78d-5cbaef8df640.jpg,640,480,yes,318,275,609,480 +yes.5d6a1bc8-bac4-11f0-a353-5cbaef8df640.jpg,640,480,yes,304,223,551,430 +yes.5e9feab9-bac4-11f0-8f3d-5cbaef8df640.jpg,640,480,yes,232,234,496,474 diff --git a/Tensorflow/workspace/images/collectedimages/hello/hello.1b79ecab-bac4-11f0-bf5a-5cbaef8df640.jpg b/Tensorflow/workspace/images/collectedimages/hello/hello.1b79ecab-bac4-11f0-bf5a-5cbaef8df640.jpg new file mode 100644 index 00000000..465b284a Binary files /dev/null and b/Tensorflow/workspace/images/collectedimages/hello/hello.1b79ecab-bac4-11f0-bf5a-5cbaef8df640.jpg differ diff --git a/Tensorflow/workspace/images/collectedimages/hello/hello.1cb18098-bac4-11f0-9c2e-5cbaef8df640.jpg b/Tensorflow/workspace/images/collectedimages/hello/hello.1cb18098-bac4-11f0-9c2e-5cbaef8df640.jpg new file mode 100644 index 00000000..2ac6de1d Binary files /dev/null and b/Tensorflow/workspace/images/collectedimages/hello/hello.1cb18098-bac4-11f0-9c2e-5cbaef8df640.jpg differ diff --git a/Tensorflow/workspace/images/collectedimages/hello/hello.1de44c2e-bac4-11f0-9a08-5cbaef8df640.jpg b/Tensorflow/workspace/images/collectedimages/hello/hello.1de44c2e-bac4-11f0-9a08-5cbaef8df640.jpg new file mode 100644 index 00000000..13ec7eb8 Binary files /dev/null and b/Tensorflow/workspace/images/collectedimages/hello/hello.1de44c2e-bac4-11f0-9a08-5cbaef8df640.jpg differ diff --git a/Tensorflow/workspace/images/collectedimages/hello/hello.1f1f9417-bac4-11f0-bf1c-5cbaef8df640.jpg b/Tensorflow/workspace/images/collectedimages/hello/hello.1f1f9417-bac4-11f0-bf1c-5cbaef8df640.jpg new file mode 100644 index 00000000..7da9b3b6 Binary files /dev/null and b/Tensorflow/workspace/images/collectedimages/hello/hello.1f1f9417-bac4-11f0-bf1c-5cbaef8df640.jpg differ diff --git a/Tensorflow/workspace/images/collectedimages/hello/hello.20527c60-bac4-11f0-948e-5cbaef8df640.jpg b/Tensorflow/workspace/images/collectedimages/hello/hello.20527c60-bac4-11f0-948e-5cbaef8df640.jpg new file mode 100644 index 00000000..62fa9558 Binary files /dev/null and b/Tensorflow/workspace/images/collectedimages/hello/hello.20527c60-bac4-11f0-948e-5cbaef8df640.jpg differ diff --git a/Tensorflow/workspace/images/collectedimages/hello/hello.2185addc-bac4-11f0-83fa-5cbaef8df640.jpg b/Tensorflow/workspace/images/collectedimages/hello/hello.2185addc-bac4-11f0-83fa-5cbaef8df640.jpg new file mode 100644 index 00000000..69a7f867 Binary files /dev/null and b/Tensorflow/workspace/images/collectedimages/hello/hello.2185addc-bac4-11f0-83fa-5cbaef8df640.jpg differ diff --git a/Tensorflow/workspace/images/collectedimages/hello/hello.22b883c9-bac4-11f0-9c57-5cbaef8df640.jpg b/Tensorflow/workspace/images/collectedimages/hello/hello.22b883c9-bac4-11f0-9c57-5cbaef8df640.jpg new file mode 100644 index 00000000..e755e188 Binary files /dev/null and b/Tensorflow/workspace/images/collectedimages/hello/hello.22b883c9-bac4-11f0-9c57-5cbaef8df640.jpg differ diff --git a/Tensorflow/workspace/images/collectedimages/hello/hello.23eb6911-bac4-11f0-b1d3-5cbaef8df640.jpg b/Tensorflow/workspace/images/collectedimages/hello/hello.23eb6911-bac4-11f0-b1d3-5cbaef8df640.jpg new file mode 100644 index 00000000..c322e48f Binary files /dev/null and b/Tensorflow/workspace/images/collectedimages/hello/hello.23eb6911-bac4-11f0-b1d3-5cbaef8df640.jpg differ diff --git a/Tensorflow/workspace/images/collectedimages/hello/hello.25228e95-bac4-11f0-8c23-5cbaef8df640.jpg b/Tensorflow/workspace/images/collectedimages/hello/hello.25228e95-bac4-11f0-8c23-5cbaef8df640.jpg new file mode 100644 index 00000000..bcb65d33 Binary files /dev/null and b/Tensorflow/workspace/images/collectedimages/hello/hello.25228e95-bac4-11f0-8c23-5cbaef8df640.jpg differ diff --git a/Tensorflow/workspace/images/collectedimages/hello/hello.26563750-bac4-11f0-acdb-5cbaef8df640.jpg b/Tensorflow/workspace/images/collectedimages/hello/hello.26563750-bac4-11f0-acdb-5cbaef8df640.jpg new file mode 100644 index 00000000..0da32204 Binary files /dev/null and b/Tensorflow/workspace/images/collectedimages/hello/hello.26563750-bac4-11f0-acdb-5cbaef8df640.jpg differ diff --git a/Tensorflow/workspace/images/collectedimages/hello/hello.27896177-bac4-11f0-a12f-5cbaef8df640.jpg b/Tensorflow/workspace/images/collectedimages/hello/hello.27896177-bac4-11f0-a12f-5cbaef8df640.jpg new file mode 100644 index 00000000..96c67c86 Binary files /dev/null and b/Tensorflow/workspace/images/collectedimages/hello/hello.27896177-bac4-11f0-a12f-5cbaef8df640.jpg differ diff --git a/Tensorflow/workspace/images/collectedimages/hello/hello.28bc669c-bac4-11f0-af1e-5cbaef8df640.jpg b/Tensorflow/workspace/images/collectedimages/hello/hello.28bc669c-bac4-11f0-af1e-5cbaef8df640.jpg new file mode 100644 index 00000000..42bfabc7 Binary files /dev/null and b/Tensorflow/workspace/images/collectedimages/hello/hello.28bc669c-bac4-11f0-af1e-5cbaef8df640.jpg differ diff --git a/Tensorflow/workspace/images/collectedimages/hello/hello.29ef7d3c-bac4-11f0-ab70-5cbaef8df640.jpg b/Tensorflow/workspace/images/collectedimages/hello/hello.29ef7d3c-bac4-11f0-ab70-5cbaef8df640.jpg new file mode 100644 index 00000000..dc7d228e Binary files /dev/null and b/Tensorflow/workspace/images/collectedimages/hello/hello.29ef7d3c-bac4-11f0-ab70-5cbaef8df640.jpg differ diff --git a/Tensorflow/workspace/images/collectedimages/hello/hello.2b22b5f1-bac4-11f0-a320-5cbaef8df640.jpg b/Tensorflow/workspace/images/collectedimages/hello/hello.2b22b5f1-bac4-11f0-a320-5cbaef8df640.jpg new file mode 100644 index 00000000..7f680f54 Binary files /dev/null and b/Tensorflow/workspace/images/collectedimages/hello/hello.2b22b5f1-bac4-11f0-a320-5cbaef8df640.jpg differ diff --git a/Tensorflow/workspace/images/collectedimages/hello/hello.2c592c1a-bac4-11f0-a7cc-5cbaef8df640.jpg b/Tensorflow/workspace/images/collectedimages/hello/hello.2c592c1a-bac4-11f0-a7cc-5cbaef8df640.jpg new file mode 100644 index 00000000..d50dd7cb Binary files /dev/null and b/Tensorflow/workspace/images/collectedimages/hello/hello.2c592c1a-bac4-11f0-a7cc-5cbaef8df640.jpg differ diff --git a/Tensorflow/workspace/images/collectedimages/iloveyou/iloveyou.802dd6fc-bac4-11f0-91ae-5cbaef8df640.jpg b/Tensorflow/workspace/images/collectedimages/iloveyou/iloveyou.802dd6fc-bac4-11f0-91ae-5cbaef8df640.jpg new file mode 100644 index 00000000..9e4afefd Binary files /dev/null and b/Tensorflow/workspace/images/collectedimages/iloveyou/iloveyou.802dd6fc-bac4-11f0-91ae-5cbaef8df640.jpg differ diff --git a/Tensorflow/workspace/images/collectedimages/iloveyou/iloveyou.8160b525-bac4-11f0-96f4-5cbaef8df640.jpg b/Tensorflow/workspace/images/collectedimages/iloveyou/iloveyou.8160b525-bac4-11f0-96f4-5cbaef8df640.jpg new file mode 100644 index 00000000..579efaf5 Binary files /dev/null and b/Tensorflow/workspace/images/collectedimages/iloveyou/iloveyou.8160b525-bac4-11f0-96f4-5cbaef8df640.jpg differ diff --git a/Tensorflow/workspace/images/collectedimages/iloveyou/iloveyou.82984a8e-bac4-11f0-bcd1-5cbaef8df640.jpg b/Tensorflow/workspace/images/collectedimages/iloveyou/iloveyou.82984a8e-bac4-11f0-bcd1-5cbaef8df640.jpg new file mode 100644 index 00000000..b238986b Binary files /dev/null and b/Tensorflow/workspace/images/collectedimages/iloveyou/iloveyou.82984a8e-bac4-11f0-bcd1-5cbaef8df640.jpg differ diff --git a/Tensorflow/workspace/images/collectedimages/iloveyou/iloveyou.83d03178-bac4-11f0-a4d2-5cbaef8df640.jpg b/Tensorflow/workspace/images/collectedimages/iloveyou/iloveyou.83d03178-bac4-11f0-a4d2-5cbaef8df640.jpg new file mode 100644 index 00000000..f0cfd69d Binary files /dev/null and b/Tensorflow/workspace/images/collectedimages/iloveyou/iloveyou.83d03178-bac4-11f0-a4d2-5cbaef8df640.jpg differ diff --git a/Tensorflow/workspace/images/collectedimages/iloveyou/iloveyou.85088187-bac4-11f0-a265-5cbaef8df640.jpg b/Tensorflow/workspace/images/collectedimages/iloveyou/iloveyou.85088187-bac4-11f0-a265-5cbaef8df640.jpg new file mode 100644 index 00000000..0591bf8e Binary files /dev/null and b/Tensorflow/workspace/images/collectedimages/iloveyou/iloveyou.85088187-bac4-11f0-a265-5cbaef8df640.jpg differ diff --git a/Tensorflow/workspace/images/collectedimages/iloveyou/iloveyou.863bb039-bac4-11f0-a027-5cbaef8df640.jpg b/Tensorflow/workspace/images/collectedimages/iloveyou/iloveyou.863bb039-bac4-11f0-a027-5cbaef8df640.jpg new file mode 100644 index 00000000..c01c3467 Binary files /dev/null and b/Tensorflow/workspace/images/collectedimages/iloveyou/iloveyou.863bb039-bac4-11f0-a027-5cbaef8df640.jpg differ diff --git a/Tensorflow/workspace/images/collectedimages/iloveyou/iloveyou.876ed591-bac4-11f0-b414-5cbaef8df640.jpg b/Tensorflow/workspace/images/collectedimages/iloveyou/iloveyou.876ed591-bac4-11f0-b414-5cbaef8df640.jpg new file mode 100644 index 00000000..a71834a7 Binary files /dev/null and b/Tensorflow/workspace/images/collectedimages/iloveyou/iloveyou.876ed591-bac4-11f0-b414-5cbaef8df640.jpg differ diff --git a/Tensorflow/workspace/images/collectedimages/iloveyou/iloveyou.88a21c55-bac4-11f0-a332-5cbaef8df640.jpg b/Tensorflow/workspace/images/collectedimages/iloveyou/iloveyou.88a21c55-bac4-11f0-a332-5cbaef8df640.jpg new file mode 100644 index 00000000..383d5f25 Binary files /dev/null and b/Tensorflow/workspace/images/collectedimages/iloveyou/iloveyou.88a21c55-bac4-11f0-a332-5cbaef8df640.jpg differ diff --git a/Tensorflow/workspace/images/collectedimages/iloveyou/iloveyou.89de8c24-bac4-11f0-816c-5cbaef8df640.jpg b/Tensorflow/workspace/images/collectedimages/iloveyou/iloveyou.89de8c24-bac4-11f0-816c-5cbaef8df640.jpg new file mode 100644 index 00000000..8416a34e Binary files /dev/null and b/Tensorflow/workspace/images/collectedimages/iloveyou/iloveyou.89de8c24-bac4-11f0-816c-5cbaef8df640.jpg differ diff --git a/Tensorflow/workspace/images/collectedimages/iloveyou/iloveyou.8b190010-bac4-11f0-89ac-5cbaef8df640.jpg b/Tensorflow/workspace/images/collectedimages/iloveyou/iloveyou.8b190010-bac4-11f0-89ac-5cbaef8df640.jpg new file mode 100644 index 00000000..a9288abb Binary files /dev/null and b/Tensorflow/workspace/images/collectedimages/iloveyou/iloveyou.8b190010-bac4-11f0-89ac-5cbaef8df640.jpg differ diff --git a/Tensorflow/workspace/images/collectedimages/iloveyou/iloveyou.8c551a2e-bac4-11f0-ae2a-5cbaef8df640.jpg b/Tensorflow/workspace/images/collectedimages/iloveyou/iloveyou.8c551a2e-bac4-11f0-ae2a-5cbaef8df640.jpg new file mode 100644 index 00000000..0343c0aa Binary files /dev/null and b/Tensorflow/workspace/images/collectedimages/iloveyou/iloveyou.8c551a2e-bac4-11f0-ae2a-5cbaef8df640.jpg differ diff --git a/Tensorflow/workspace/images/collectedimages/iloveyou/iloveyou.8d90dcb0-bac4-11f0-a75f-5cbaef8df640.jpg b/Tensorflow/workspace/images/collectedimages/iloveyou/iloveyou.8d90dcb0-bac4-11f0-a75f-5cbaef8df640.jpg new file mode 100644 index 00000000..05cb77bb Binary files /dev/null and b/Tensorflow/workspace/images/collectedimages/iloveyou/iloveyou.8d90dcb0-bac4-11f0-a75f-5cbaef8df640.jpg differ diff --git a/Tensorflow/workspace/images/collectedimages/iloveyou/iloveyou.8ec39275-bac4-11f0-9943-5cbaef8df640.jpg b/Tensorflow/workspace/images/collectedimages/iloveyou/iloveyou.8ec39275-bac4-11f0-9943-5cbaef8df640.jpg new file mode 100644 index 00000000..d226f7f1 Binary files /dev/null and b/Tensorflow/workspace/images/collectedimages/iloveyou/iloveyou.8ec39275-bac4-11f0-9943-5cbaef8df640.jpg differ diff --git a/Tensorflow/workspace/images/collectedimages/iloveyou/iloveyou.8ff66380-bac4-11f0-8ac3-5cbaef8df640.jpg b/Tensorflow/workspace/images/collectedimages/iloveyou/iloveyou.8ff66380-bac4-11f0-8ac3-5cbaef8df640.jpg new file mode 100644 index 00000000..f82b88e2 Binary files /dev/null and b/Tensorflow/workspace/images/collectedimages/iloveyou/iloveyou.8ff66380-bac4-11f0-8ac3-5cbaef8df640.jpg differ diff --git a/Tensorflow/workspace/images/collectedimages/iloveyou/iloveyou.9130b2f5-bac4-11f0-9ee5-5cbaef8df640.jpg b/Tensorflow/workspace/images/collectedimages/iloveyou/iloveyou.9130b2f5-bac4-11f0-9ee5-5cbaef8df640.jpg new file mode 100644 index 00000000..2d603177 Binary files /dev/null and b/Tensorflow/workspace/images/collectedimages/iloveyou/iloveyou.9130b2f5-bac4-11f0-9ee5-5cbaef8df640.jpg differ diff --git a/Tensorflow/workspace/images/collectedimages/no/no.670e6e7e-bac4-11f0-b29f-5cbaef8df640.jpg b/Tensorflow/workspace/images/collectedimages/no/no.670e6e7e-bac4-11f0-b29f-5cbaef8df640.jpg new file mode 100644 index 00000000..acef8a5a Binary files /dev/null and b/Tensorflow/workspace/images/collectedimages/no/no.670e6e7e-bac4-11f0-b29f-5cbaef8df640.jpg differ diff --git a/Tensorflow/workspace/images/collectedimages/no/no.68417c0d-bac4-11f0-862c-5cbaef8df640.jpg b/Tensorflow/workspace/images/collectedimages/no/no.68417c0d-bac4-11f0-862c-5cbaef8df640.jpg new file mode 100644 index 00000000..7b782b46 Binary files /dev/null and b/Tensorflow/workspace/images/collectedimages/no/no.68417c0d-bac4-11f0-862c-5cbaef8df640.jpg differ diff --git a/Tensorflow/workspace/images/collectedimages/no/no.6974adaf-bac4-11f0-980e-5cbaef8df640.jpg b/Tensorflow/workspace/images/collectedimages/no/no.6974adaf-bac4-11f0-980e-5cbaef8df640.jpg new file mode 100644 index 00000000..64a589aa Binary files /dev/null and b/Tensorflow/workspace/images/collectedimages/no/no.6974adaf-bac4-11f0-980e-5cbaef8df640.jpg differ diff --git a/Tensorflow/workspace/images/collectedimages/no/no.6aafd544-bac4-11f0-9bd0-5cbaef8df640.jpg b/Tensorflow/workspace/images/collectedimages/no/no.6aafd544-bac4-11f0-9bd0-5cbaef8df640.jpg new file mode 100644 index 00000000..9f2786a0 Binary files /dev/null and b/Tensorflow/workspace/images/collectedimages/no/no.6aafd544-bac4-11f0-9bd0-5cbaef8df640.jpg differ diff --git a/Tensorflow/workspace/images/collectedimages/no/no.6be2ed99-bac4-11f0-a855-5cbaef8df640.jpg b/Tensorflow/workspace/images/collectedimages/no/no.6be2ed99-bac4-11f0-a855-5cbaef8df640.jpg new file mode 100644 index 00000000..98e0e671 Binary files /dev/null and b/Tensorflow/workspace/images/collectedimages/no/no.6be2ed99-bac4-11f0-a855-5cbaef8df640.jpg differ diff --git a/Tensorflow/workspace/images/collectedimages/no/no.6d1d6102-bac4-11f0-b3bb-5cbaef8df640.jpg b/Tensorflow/workspace/images/collectedimages/no/no.6d1d6102-bac4-11f0-b3bb-5cbaef8df640.jpg new file mode 100644 index 00000000..5ce95dd3 Binary files /dev/null and b/Tensorflow/workspace/images/collectedimages/no/no.6d1d6102-bac4-11f0-b3bb-5cbaef8df640.jpg differ diff --git a/Tensorflow/workspace/images/collectedimages/no/no.6e504eae-bac4-11f0-a6a6-5cbaef8df640.jpg b/Tensorflow/workspace/images/collectedimages/no/no.6e504eae-bac4-11f0-a6a6-5cbaef8df640.jpg new file mode 100644 index 00000000..640efb51 Binary files /dev/null and b/Tensorflow/workspace/images/collectedimages/no/no.6e504eae-bac4-11f0-a6a6-5cbaef8df640.jpg differ diff --git a/Tensorflow/workspace/images/collectedimages/no/no.6f83445d-bac4-11f0-8e39-5cbaef8df640.jpg b/Tensorflow/workspace/images/collectedimages/no/no.6f83445d-bac4-11f0-8e39-5cbaef8df640.jpg new file mode 100644 index 00000000..d70c92f0 Binary files /dev/null and b/Tensorflow/workspace/images/collectedimages/no/no.6f83445d-bac4-11f0-8e39-5cbaef8df640.jpg differ diff --git a/Tensorflow/workspace/images/collectedimages/no/no.70b638d7-bac4-11f0-b084-5cbaef8df640.jpg b/Tensorflow/workspace/images/collectedimages/no/no.70b638d7-bac4-11f0-b084-5cbaef8df640.jpg new file mode 100644 index 00000000..20cf741f Binary files /dev/null and b/Tensorflow/workspace/images/collectedimages/no/no.70b638d7-bac4-11f0-b084-5cbaef8df640.jpg differ diff --git a/Tensorflow/workspace/images/collectedimages/no/no.71e8e6bd-bac4-11f0-af2a-5cbaef8df640.jpg b/Tensorflow/workspace/images/collectedimages/no/no.71e8e6bd-bac4-11f0-af2a-5cbaef8df640.jpg new file mode 100644 index 00000000..fdcba79d Binary files /dev/null and b/Tensorflow/workspace/images/collectedimages/no/no.71e8e6bd-bac4-11f0-af2a-5cbaef8df640.jpg differ diff --git a/Tensorflow/workspace/images/collectedimages/no/no.731c0dda-bac4-11f0-ad54-5cbaef8df640.jpg b/Tensorflow/workspace/images/collectedimages/no/no.731c0dda-bac4-11f0-ad54-5cbaef8df640.jpg new file mode 100644 index 00000000..11f9b236 Binary files /dev/null and b/Tensorflow/workspace/images/collectedimages/no/no.731c0dda-bac4-11f0-ad54-5cbaef8df640.jpg differ diff --git a/Tensorflow/workspace/images/collectedimages/no/no.744f306e-bac4-11f0-9ab7-5cbaef8df640.jpg b/Tensorflow/workspace/images/collectedimages/no/no.744f306e-bac4-11f0-9ab7-5cbaef8df640.jpg new file mode 100644 index 00000000..f09d5a96 Binary files /dev/null and b/Tensorflow/workspace/images/collectedimages/no/no.744f306e-bac4-11f0-9ab7-5cbaef8df640.jpg differ diff --git a/Tensorflow/workspace/images/collectedimages/no/no.75834004-bac4-11f0-a7fa-5cbaef8df640.jpg b/Tensorflow/workspace/images/collectedimages/no/no.75834004-bac4-11f0-a7fa-5cbaef8df640.jpg new file mode 100644 index 00000000..f46e0e8f Binary files /dev/null and b/Tensorflow/workspace/images/collectedimages/no/no.75834004-bac4-11f0-a7fa-5cbaef8df640.jpg differ diff --git a/Tensorflow/workspace/images/collectedimages/no/no.76b66b2e-bac4-11f0-8862-5cbaef8df640.jpg b/Tensorflow/workspace/images/collectedimages/no/no.76b66b2e-bac4-11f0-8862-5cbaef8df640.jpg new file mode 100644 index 00000000..ad6c946a Binary files /dev/null and b/Tensorflow/workspace/images/collectedimages/no/no.76b66b2e-bac4-11f0-8862-5cbaef8df640.jpg differ diff --git a/Tensorflow/workspace/images/collectedimages/no/no.77eeede0-bac4-11f0-ad40-5cbaef8df640.jpg b/Tensorflow/workspace/images/collectedimages/no/no.77eeede0-bac4-11f0-ad40-5cbaef8df640.jpg new file mode 100644 index 00000000..453ed4d0 Binary files /dev/null and b/Tensorflow/workspace/images/collectedimages/no/no.77eeede0-bac4-11f0-ad40-5cbaef8df640.jpg differ diff --git a/Tensorflow/workspace/images/collectedimages/thanks/thanks.34b2a8d7-bac4-11f0-bcb4-5cbaef8df640.jpg b/Tensorflow/workspace/images/collectedimages/thanks/thanks.34b2a8d7-bac4-11f0-bcb4-5cbaef8df640.jpg new file mode 100644 index 00000000..22acfe47 Binary files /dev/null and b/Tensorflow/workspace/images/collectedimages/thanks/thanks.34b2a8d7-bac4-11f0-bcb4-5cbaef8df640.jpg differ diff --git a/Tensorflow/workspace/images/collectedimages/thanks/thanks.35e5dd7d-bac4-11f0-ba11-5cbaef8df640.jpg b/Tensorflow/workspace/images/collectedimages/thanks/thanks.35e5dd7d-bac4-11f0-ba11-5cbaef8df640.jpg new file mode 100644 index 00000000..b4de2932 Binary files /dev/null and b/Tensorflow/workspace/images/collectedimages/thanks/thanks.35e5dd7d-bac4-11f0-ba11-5cbaef8df640.jpg differ diff --git a/Tensorflow/workspace/images/collectedimages/thanks/thanks.3718a6e5-bac4-11f0-9e98-5cbaef8df640.jpg b/Tensorflow/workspace/images/collectedimages/thanks/thanks.3718a6e5-bac4-11f0-9e98-5cbaef8df640.jpg new file mode 100644 index 00000000..8d3ec5ac Binary files /dev/null and b/Tensorflow/workspace/images/collectedimages/thanks/thanks.3718a6e5-bac4-11f0-9e98-5cbaef8df640.jpg differ diff --git a/Tensorflow/workspace/images/collectedimages/thanks/thanks.3853fd0e-bac4-11f0-b0c6-5cbaef8df640.jpg b/Tensorflow/workspace/images/collectedimages/thanks/thanks.3853fd0e-bac4-11f0-b0c6-5cbaef8df640.jpg new file mode 100644 index 00000000..05558bee Binary files /dev/null and b/Tensorflow/workspace/images/collectedimages/thanks/thanks.3853fd0e-bac4-11f0-b0c6-5cbaef8df640.jpg differ diff --git a/Tensorflow/workspace/images/collectedimages/thanks/thanks.398709fd-bac4-11f0-8cb2-5cbaef8df640.jpg b/Tensorflow/workspace/images/collectedimages/thanks/thanks.398709fd-bac4-11f0-8cb2-5cbaef8df640.jpg new file mode 100644 index 00000000..b23894d7 Binary files /dev/null and b/Tensorflow/workspace/images/collectedimages/thanks/thanks.398709fd-bac4-11f0-8cb2-5cbaef8df640.jpg differ diff --git a/Tensorflow/workspace/images/collectedimages/thanks/thanks.3ac0e6b6-bac4-11f0-b016-5cbaef8df640.jpg b/Tensorflow/workspace/images/collectedimages/thanks/thanks.3ac0e6b6-bac4-11f0-b016-5cbaef8df640.jpg new file mode 100644 index 00000000..6225f04b Binary files /dev/null and b/Tensorflow/workspace/images/collectedimages/thanks/thanks.3ac0e6b6-bac4-11f0-b016-5cbaef8df640.jpg differ diff --git a/Tensorflow/workspace/images/collectedimages/thanks/thanks.3bf93d6d-bac4-11f0-b8ca-5cbaef8df640.jpg b/Tensorflow/workspace/images/collectedimages/thanks/thanks.3bf93d6d-bac4-11f0-b8ca-5cbaef8df640.jpg new file mode 100644 index 00000000..b763e875 Binary files /dev/null and b/Tensorflow/workspace/images/collectedimages/thanks/thanks.3bf93d6d-bac4-11f0-b8ca-5cbaef8df640.jpg differ diff --git a/Tensorflow/workspace/images/collectedimages/thanks/thanks.3d2c85c1-bac4-11f0-a73c-5cbaef8df640.jpg b/Tensorflow/workspace/images/collectedimages/thanks/thanks.3d2c85c1-bac4-11f0-a73c-5cbaef8df640.jpg new file mode 100644 index 00000000..19305280 Binary files /dev/null and b/Tensorflow/workspace/images/collectedimages/thanks/thanks.3d2c85c1-bac4-11f0-a73c-5cbaef8df640.jpg differ diff --git a/Tensorflow/workspace/images/collectedimages/thanks/thanks.3e5f99c3-bac4-11f0-95eb-5cbaef8df640.jpg b/Tensorflow/workspace/images/collectedimages/thanks/thanks.3e5f99c3-bac4-11f0-95eb-5cbaef8df640.jpg new file mode 100644 index 00000000..6b3fe6ae Binary files /dev/null and b/Tensorflow/workspace/images/collectedimages/thanks/thanks.3e5f99c3-bac4-11f0-95eb-5cbaef8df640.jpg differ diff --git a/Tensorflow/workspace/images/collectedimages/thanks/thanks.3f9a0441-bac4-11f0-911f-5cbaef8df640.jpg b/Tensorflow/workspace/images/collectedimages/thanks/thanks.3f9a0441-bac4-11f0-911f-5cbaef8df640.jpg new file mode 100644 index 00000000..0384fffe Binary files /dev/null and b/Tensorflow/workspace/images/collectedimages/thanks/thanks.3f9a0441-bac4-11f0-911f-5cbaef8df640.jpg differ diff --git a/Tensorflow/workspace/images/collectedimages/thanks/thanks.40ccf7c0-bac4-11f0-af20-5cbaef8df640.jpg b/Tensorflow/workspace/images/collectedimages/thanks/thanks.40ccf7c0-bac4-11f0-af20-5cbaef8df640.jpg new file mode 100644 index 00000000..86edf677 Binary files /dev/null and b/Tensorflow/workspace/images/collectedimages/thanks/thanks.40ccf7c0-bac4-11f0-af20-5cbaef8df640.jpg differ diff --git a/Tensorflow/workspace/images/collectedimages/thanks/thanks.41ffd10f-bac4-11f0-a88e-5cbaef8df640.jpg b/Tensorflow/workspace/images/collectedimages/thanks/thanks.41ffd10f-bac4-11f0-a88e-5cbaef8df640.jpg new file mode 100644 index 00000000..29460b3f Binary files /dev/null and b/Tensorflow/workspace/images/collectedimages/thanks/thanks.41ffd10f-bac4-11f0-a88e-5cbaef8df640.jpg differ diff --git a/Tensorflow/workspace/images/collectedimages/thanks/thanks.4332f3a3-bac4-11f0-918e-5cbaef8df640.jpg b/Tensorflow/workspace/images/collectedimages/thanks/thanks.4332f3a3-bac4-11f0-918e-5cbaef8df640.jpg new file mode 100644 index 00000000..c62690f1 Binary files /dev/null and b/Tensorflow/workspace/images/collectedimages/thanks/thanks.4332f3a3-bac4-11f0-918e-5cbaef8df640.jpg differ diff --git a/Tensorflow/workspace/images/collectedimages/thanks/thanks.44660a79-bac4-11f0-a87b-5cbaef8df640.jpg b/Tensorflow/workspace/images/collectedimages/thanks/thanks.44660a79-bac4-11f0-a87b-5cbaef8df640.jpg new file mode 100644 index 00000000..d5fc2a7d Binary files /dev/null and b/Tensorflow/workspace/images/collectedimages/thanks/thanks.44660a79-bac4-11f0-a87b-5cbaef8df640.jpg differ diff --git a/Tensorflow/workspace/images/collectedimages/thanks/thanks.45a0b4a4-bac4-11f0-81a3-5cbaef8df640.jpg b/Tensorflow/workspace/images/collectedimages/thanks/thanks.45a0b4a4-bac4-11f0-81a3-5cbaef8df640.jpg new file mode 100644 index 00000000..15a56df1 Binary files /dev/null and b/Tensorflow/workspace/images/collectedimages/thanks/thanks.45a0b4a4-bac4-11f0-81a3-5cbaef8df640.jpg differ diff --git a/Tensorflow/workspace/images/collectedimages/yes/yes.4dc27c96-bac4-11f0-a869-5cbaef8df640.jpg b/Tensorflow/workspace/images/collectedimages/yes/yes.4dc27c96-bac4-11f0-a869-5cbaef8df640.jpg new file mode 100644 index 00000000..026685e3 Binary files /dev/null and b/Tensorflow/workspace/images/collectedimages/yes/yes.4dc27c96-bac4-11f0-a869-5cbaef8df640.jpg differ diff --git a/Tensorflow/workspace/images/collectedimages/yes/yes.4ef54869-bac4-11f0-b6c7-5cbaef8df640.jpg b/Tensorflow/workspace/images/collectedimages/yes/yes.4ef54869-bac4-11f0-b6c7-5cbaef8df640.jpg new file mode 100644 index 00000000..313cf882 Binary files /dev/null and b/Tensorflow/workspace/images/collectedimages/yes/yes.4ef54869-bac4-11f0-b6c7-5cbaef8df640.jpg differ diff --git a/Tensorflow/workspace/images/collectedimages/yes/yes.5028f404-bac4-11f0-9cc2-5cbaef8df640.jpg b/Tensorflow/workspace/images/collectedimages/yes/yes.5028f404-bac4-11f0-9cc2-5cbaef8df640.jpg new file mode 100644 index 00000000..a8c68d8c Binary files /dev/null and b/Tensorflow/workspace/images/collectedimages/yes/yes.5028f404-bac4-11f0-9cc2-5cbaef8df640.jpg differ diff --git a/Tensorflow/workspace/images/collectedimages/yes/yes.516242a6-bac4-11f0-be17-5cbaef8df640.jpg b/Tensorflow/workspace/images/collectedimages/yes/yes.516242a6-bac4-11f0-be17-5cbaef8df640.jpg new file mode 100644 index 00000000..0b1ebc01 Binary files /dev/null and b/Tensorflow/workspace/images/collectedimages/yes/yes.516242a6-bac4-11f0-be17-5cbaef8df640.jpg differ diff --git a/Tensorflow/workspace/images/collectedimages/yes/yes.52955cc7-bac4-11f0-9267-5cbaef8df640.jpg b/Tensorflow/workspace/images/collectedimages/yes/yes.52955cc7-bac4-11f0-9267-5cbaef8df640.jpg new file mode 100644 index 00000000..7d98df9e Binary files /dev/null and b/Tensorflow/workspace/images/collectedimages/yes/yes.52955cc7-bac4-11f0-9267-5cbaef8df640.jpg differ diff --git a/Tensorflow/workspace/images/collectedimages/yes/yes.53c8132e-bac4-11f0-9127-5cbaef8df640.jpg b/Tensorflow/workspace/images/collectedimages/yes/yes.53c8132e-bac4-11f0-9127-5cbaef8df640.jpg new file mode 100644 index 00000000..046a4ed2 Binary files /dev/null and b/Tensorflow/workspace/images/collectedimages/yes/yes.53c8132e-bac4-11f0-9127-5cbaef8df640.jpg differ diff --git a/Tensorflow/workspace/images/collectedimages/yes/yes.54fadd9e-bac4-11f0-b8f2-5cbaef8df640.jpg b/Tensorflow/workspace/images/collectedimages/yes/yes.54fadd9e-bac4-11f0-b8f2-5cbaef8df640.jpg new file mode 100644 index 00000000..467a6bfc Binary files /dev/null and b/Tensorflow/workspace/images/collectedimages/yes/yes.54fadd9e-bac4-11f0-b8f2-5cbaef8df640.jpg differ diff --git a/Tensorflow/workspace/images/collectedimages/yes/yes.562e2350-bac4-11f0-b951-5cbaef8df640.jpg b/Tensorflow/workspace/images/collectedimages/yes/yes.562e2350-bac4-11f0-b951-5cbaef8df640.jpg new file mode 100644 index 00000000..b4e5a5a8 Binary files /dev/null and b/Tensorflow/workspace/images/collectedimages/yes/yes.562e2350-bac4-11f0-b951-5cbaef8df640.jpg differ diff --git a/Tensorflow/workspace/images/collectedimages/yes/yes.57614b4f-bac4-11f0-bf72-5cbaef8df640.jpg b/Tensorflow/workspace/images/collectedimages/yes/yes.57614b4f-bac4-11f0-bf72-5cbaef8df640.jpg new file mode 100644 index 00000000..18ad9b68 Binary files /dev/null and b/Tensorflow/workspace/images/collectedimages/yes/yes.57614b4f-bac4-11f0-bf72-5cbaef8df640.jpg differ diff --git a/Tensorflow/workspace/images/collectedimages/yes/yes.5897143a-bac4-11f0-a547-5cbaef8df640.jpg b/Tensorflow/workspace/images/collectedimages/yes/yes.5897143a-bac4-11f0-a547-5cbaef8df640.jpg new file mode 100644 index 00000000..b9162008 Binary files /dev/null and b/Tensorflow/workspace/images/collectedimages/yes/yes.5897143a-bac4-11f0-a547-5cbaef8df640.jpg differ diff --git a/Tensorflow/workspace/images/collectedimages/yes/yes.59cd997e-bac4-11f0-b5ad-5cbaef8df640.jpg b/Tensorflow/workspace/images/collectedimages/yes/yes.59cd997e-bac4-11f0-b5ad-5cbaef8df640.jpg new file mode 100644 index 00000000..cf4548e0 Binary files /dev/null and b/Tensorflow/workspace/images/collectedimages/yes/yes.59cd997e-bac4-11f0-b5ad-5cbaef8df640.jpg differ diff --git a/Tensorflow/workspace/images/collectedimages/yes/yes.5b00a5b1-bac4-11f0-b2fb-5cbaef8df640.jpg b/Tensorflow/workspace/images/collectedimages/yes/yes.5b00a5b1-bac4-11f0-b2fb-5cbaef8df640.jpg new file mode 100644 index 00000000..d06ed276 Binary files /dev/null and b/Tensorflow/workspace/images/collectedimages/yes/yes.5b00a5b1-bac4-11f0-b2fb-5cbaef8df640.jpg differ diff --git a/Tensorflow/workspace/images/collectedimages/yes/yes.5c3397f9-bac4-11f0-b78d-5cbaef8df640.jpg b/Tensorflow/workspace/images/collectedimages/yes/yes.5c3397f9-bac4-11f0-b78d-5cbaef8df640.jpg new file mode 100644 index 00000000..ddccfdfe Binary files /dev/null and b/Tensorflow/workspace/images/collectedimages/yes/yes.5c3397f9-bac4-11f0-b78d-5cbaef8df640.jpg differ diff --git a/Tensorflow/workspace/images/collectedimages/yes/yes.5d6a1bc8-bac4-11f0-a353-5cbaef8df640.jpg b/Tensorflow/workspace/images/collectedimages/yes/yes.5d6a1bc8-bac4-11f0-a353-5cbaef8df640.jpg new file mode 100644 index 00000000..feac9fac Binary files /dev/null and b/Tensorflow/workspace/images/collectedimages/yes/yes.5d6a1bc8-bac4-11f0-a353-5cbaef8df640.jpg differ diff --git a/Tensorflow/workspace/images/collectedimages/yes/yes.5e9feab9-bac4-11f0-8f3d-5cbaef8df640.jpg b/Tensorflow/workspace/images/collectedimages/yes/yes.5e9feab9-bac4-11f0-8f3d-5cbaef8df640.jpg new file mode 100644 index 00000000..7653e23c Binary files /dev/null and b/Tensorflow/workspace/images/collectedimages/yes/yes.5e9feab9-bac4-11f0-8f3d-5cbaef8df640.jpg differ diff --git a/Tensorflow/workspace/models/my_ssd_mobnet/pipeline.config b/Tensorflow/workspace/models/my_ssd_mobnet/pipeline.config new file mode 100644 index 00000000..fc151ecb --- /dev/null +++ b/Tensorflow/workspace/models/my_ssd_mobnet/pipeline.config @@ -0,0 +1,191 @@ +model { + ssd { + num_classes: 90 + image_resizer { + fixed_shape_resizer { + height: 320 + width: 320 + } + } + feature_extractor { + type: "ssd_mobilenet_v2_fpn_keras" + depth_multiplier: 1.0 + min_depth: 16 + conv_hyperparams { + regularizer { + l2_regularizer { + weight: 3.9999998989515007e-05 + } + } + initializer { + random_normal_initializer { + mean: 0.0 + stddev: 0.009999999776482582 + } + } + activation: RELU_6 + batch_norm { + decay: 0.996999979019165 + scale: true + epsilon: 0.0010000000474974513 + } + } + use_depthwise: true + override_base_feature_extractor_hyperparams: true + fpn { + min_level: 3 + max_level: 7 + additional_layer_depth: 128 + } + } + box_coder { + faster_rcnn_box_coder { + y_scale: 10.0 + x_scale: 10.0 + height_scale: 5.0 + width_scale: 5.0 + } + } + matcher { + argmax_matcher { + matched_threshold: 0.5 + unmatched_threshold: 0.5 + ignore_thresholds: false + negatives_lower_than_unmatched: true + force_match_for_each_row: true + use_matmul_gather: true + } + } + similarity_calculator { + iou_similarity { + } + } + box_predictor { + weight_shared_convolutional_box_predictor { + conv_hyperparams { + regularizer { + l2_regularizer { + weight: 3.9999998989515007e-05 + } + } + initializer { + random_normal_initializer { + mean: 0.0 + stddev: 0.009999999776482582 + } + } + activation: RELU_6 + batch_norm { + decay: 0.996999979019165 + scale: true + epsilon: 0.0010000000474974513 + } + } + depth: 128 + num_layers_before_predictor: 4 + kernel_size: 3 + class_prediction_bias_init: -4.599999904632568 + share_prediction_tower: true + use_depthwise: true + } + } + anchor_generator { + multiscale_anchor_generator { + min_level: 3 + max_level: 7 + anchor_scale: 4.0 + aspect_ratios: 1.0 + aspect_ratios: 2.0 + aspect_ratios: 0.5 + scales_per_octave: 2 + } + } + post_processing { + batch_non_max_suppression { + score_threshold: 9.99999993922529e-09 + iou_threshold: 0.6000000238418579 + max_detections_per_class: 100 + max_total_detections: 100 + use_static_shapes: false + } + score_converter: SIGMOID + } + normalize_loss_by_num_matches: true + loss { + localization_loss { + weighted_smooth_l1 { + } + } + classification_loss { + weighted_sigmoid_focal { + gamma: 2.0 + alpha: 0.25 + } + } + classification_weight: 1.0 + localization_weight: 1.0 + } + encode_background_as_zeros: true + normalize_loc_loss_by_codesize: true + inplace_batchnorm_update: true + freeze_batchnorm: false + } +} +train_config { + batch_size: 128 + data_augmentation_options { + random_horizontal_flip { + } + } + data_augmentation_options { + random_crop_image { + min_object_covered: 0.0 + min_aspect_ratio: 0.75 + max_aspect_ratio: 3.0 + min_area: 0.75 + max_area: 1.0 + overlap_thresh: 0.0 + } + } + sync_replicas: true + optimizer { + momentum_optimizer { + learning_rate { + cosine_decay_learning_rate { + learning_rate_base: 0.07999999821186066 + total_steps: 50000 + warmup_learning_rate: 0.026666000485420227 + warmup_steps: 1000 + } + } + momentum_optimizer_value: 0.8999999761581421 + } + use_moving_average: false + } + fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED" + num_steps: 50000 + startup_delay_steps: 0.0 + replicas_to_aggregate: 8 + max_number_of_boxes: 100 + unpad_groundtruth_tensors: false + fine_tune_checkpoint_type: "classification" + fine_tune_checkpoint_version: V2 +} +train_input_reader { + label_map_path: "PATH_TO_BE_CONFIGURED" + tf_record_input_reader { + input_path: "PATH_TO_BE_CONFIGURED" + } +} +eval_config { + metrics_set: "coco_detection_metrics" + use_moving_averages: false +} +eval_input_reader { + label_map_path: "PATH_TO_BE_CONFIGURED" + shuffle: false + num_epochs: 1 + tf_record_input_reader { + input_path: "PATH_TO_BE_CONFIGURED" + } +} diff --git a/Training.ipynb b/Training.ipynb new file mode 100644 index 00000000..6f210a8c --- /dev/null +++ b/Training.ipynb @@ -0,0 +1,540 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "id": "7f343d88-aad0-4ecf-9156-77303387c13a", + "metadata": {}, + "outputs": [], + "source": [ + "WORKSPACE_PATH = 'Tensorflow/workspace'\n", + "SCRIPTS_PATH = 'Tensorflow/scripts'\n", + "APIMODEL_PATH = 'Tensorflow/models'\n", + "ANNOTATION_PATH = WORKSPACE_PATH+'/annotations'\n", + "IMAGE_PATH = WORKSPACE_PATH+'/images'\n", + "MODEL_PATH = WORKSPACE_PATH+'/models'\n", + "PRETRAINED_MODEL_PATH = WORKSPACE_PATH+'/pre-trained-models'\n", + "CONFIG_PATH = MODEL_PATH+'/my_ssd_mobnet/pipeline.config'\n", + "CHECKPOINT_PATH = MODEL_PATH+'/my_ssd_mobnet/'" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "620c6454-448c-434f-9cb5-e6fe3d34ec45", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "fatal: destination path 'models' already exists and is not an empty directory.\r\n" + ] + } + ], + "source": [ + "!cd Tensorflow && git clone https://github.com/tensorflow/models" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "1731735c-1574-443f-94b3-9248b6661d5c", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "/bin/sh: 1: brew: not found\r\n" + ] + } + ], + "source": [ + "!brew install wget\n" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "52c2ab27-b439-4c9d-80f0-d27854f950b2", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "/bin/sh: 1: wget: not found\r\n" + ] + } + ], + "source": [ + "#Download the pre-trained SSD MobileNet model\n", + "!wget http://download.tensorflow.org/models/object_detection/tf2/20200711/ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8.tar.gz\n" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "0053edcd-9137-4763-959d-35447ec13a16", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "mv: cannot stat 'ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8.tar.gz': No such file or directory\r\n" + ] + } + ], + "source": [ + "#Move the tar.gz file to your pre-trained-models folder\n", + "!mv ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8.tar.gz Tensorflow/workspace/pre-trained-models/\n" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "b6e3c3ad", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Get:1 http://security.ubuntu.com/ubuntu bionic-security InRelease [102 kB]\n", + "Hit:2 http://archive.ubuntu.com/ubuntu bionic InRelease \n", + "Get:3 http://archive.ubuntu.com/ubuntu bionic-updates InRelease [102 kB] \n", + "Get:4 http://archive.ubuntu.com/ubuntu bionic-backports InRelease [102 kB] \n", + "Get:5 http://security.ubuntu.com/ubuntu bionic-security/universe amd64 Packages [1637 kB]\n", + "Get:6 http://security.ubuntu.com/ubuntu bionic-security/multiverse amd64 Packages [23.8 kB]\n", + "Get:7 http://security.ubuntu.com/ubuntu bionic-security/main amd64 Packages [3373 kB]\n", + "Get:8 http://security.ubuntu.com/ubuntu bionic-security/restricted amd64 Packages [1688 kB]\n", + "Get:9 http://archive.ubuntu.com/ubuntu bionic-updates/universe amd64 Packages [2411 kB]\n", + "Get:10 http://archive.ubuntu.com/ubuntu bionic-updates/multiverse amd64 Packages [30.8 kB]\n", + "Get:11 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 Packages [3786 kB]\n", + "Get:12 http://archive.ubuntu.com/ubuntu bionic-updates/restricted amd64 Packages [1728 kB]\n", + "Get:13 http://archive.ubuntu.com/ubuntu bionic-backports/main amd64 Packages [64.0 kB]\n", + "Get:14 http://archive.ubuntu.com/ubuntu bionic-backports/universe amd64 Packages [20.6 kB]\n", + "Fetched 15.1 MB in 4s (3885 kB/s) \n", + "Reading package lists... Done\n", + "Reading package lists... Done\n", + "Building dependency tree \n", + "Reading state information... Done\n", + "The following NEW packages will be installed:\n", + " wget\n", + "0 upgraded, 1 newly installed, 0 to remove and 113 not upgraded.\n", + "Need to get 316 kB of archives.\n", + "After this operation, 954 kB of additional disk space will be used.\n", + "Get:1 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 wget amd64 1.19.4-1ubuntu2.2 [316 kB]\n", + "Fetched 316 kB in 1s (398 kB/s)\n", + "debconf: delaying package configuration, since apt-utils is not installed\n", + "Selecting previously unselected package wget.\n", + "(Reading database ... 15982 files and directories currently installed.)\n", + "Preparing to unpack .../wget_1.19.4-1ubuntu2.2_amd64.deb ...\n", + "Unpacking wget (1.19.4-1ubuntu2.2) ...\n", + "Setting up wget (1.19.4-1ubuntu2.2) ...\n" + ] + } + ], + "source": [ + "!apt-get update\n", + "!apt-get install -y wget\n" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "23e84235-c89d-4b38-a7ae-bdf2204a6573", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8/\n", + "ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8/checkpoint/\n", + "ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8/checkpoint/ckpt-0.data-00000-of-00001\n", + "ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8/checkpoint/checkpoint\n", + "ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8/checkpoint/ckpt-0.index\n", + "ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8/pipeline.config\n", + "ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8/saved_model/\n", + "ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8/saved_model/saved_model.pb\n", + "ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8/saved_model/variables/\n", + "ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8/saved_model/variables/variables.data-00000-of-00001\n", + "ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8/saved_model/variables/variables.index\n" + ] + } + ], + "source": [ + "#Extract the contents of the pretrained model\n", + "!cd Tensorflow/workspace/pre-trained-models && tar -xzvf ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8.tar.gz\n" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "1a6fb550-a507-4c5b-80c4-581ff9f88cf5", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[34mssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8\u001b[m\u001b[m\n", + "ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8.tar.gz\n", + "\n", + "Tensorflow/workspace/pre-trained-models/ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8:\n", + "\u001b[34mcheckpoint\u001b[m\u001b[m pipeline.config \u001b[34msaved_model\u001b[m\u001b[m\n", + "\n", + "Tensorflow/workspace/pre-trained-models/ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8/checkpoint:\n", + "checkpoint ckpt-0.index\n", + "ckpt-0.data-00000-of-00001\n", + "\n", + "Tensorflow/workspace/pre-trained-models/ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8/saved_model:\n", + "saved_model.pb \u001b[34mvariables\u001b[m\u001b[m\n", + "\n", + "Tensorflow/workspace/pre-trained-models/ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8/saved_model/variables:\n", + "variables.data-00000-of-00001 variables.index\n" + ] + } + ], + "source": [ + "!ls -R Tensorflow/workspace/pre-trained-models\n" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "68489bd3-68b6-4ec8-95f5-3f15504baa52", + "metadata": {}, + "outputs": [], + "source": [ + "#STEP 4 — Copy Model Config to Training Folder\n", + "CUSTOM_MODEL_NAME = 'my_ssd_mobnet'\n" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "cf2bccc5-7971-458a-a56e-0f35cc345edd", + "metadata": {}, + "outputs": [], + "source": [ + "!mkdir -p Tensorflow/workspace/models/{CUSTOM_MODEL_NAME}\n" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "baabe134-7130-4012-bcf2-43b7423aacad", + "metadata": {}, + "outputs": [], + "source": [ + "!cp Tensorflow/workspace/pre-trained-models/ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8/pipeline.config \\\n", + "Tensorflow/workspace/models/{CUSTOM_MODEL_NAME}/\n" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "d743fb5b-c219-4944-899b-6e011f4007d5", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "pipeline.config\n" + ] + } + ], + "source": [ + "!ls Tensorflow/workspace/models/{CUSTOM_MODEL_NAME}\n" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "3fc89703-2de9-4528-b4c9-cbc2ed1e8be8", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "label_map.pbtxt test_labels.csv train_labels.csv\n", + "test.record train.record\n" + ] + } + ], + "source": [ + "!ls Tensorflow/workspace/annotations\n" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "8bcbd8fb-e9c3-454d-86ba-0604dc7795f9", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[34mmy_ssd_mobnet\u001b[m\u001b[m\n" + ] + } + ], + "source": [ + "!ls Tensorflow/workspace/models\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d8b5c1cd-f873-47b1-b83b-bb44d20a4602", + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "#step 5 -Update Config For Transfer Learning\n", + "import tensorflow as tf\n", + "from object_detection.utils import config_util\n", + "from object_detection.protos import pipeline_pb2\n", + "from google.protobuf import text_format\n", + "\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "id": "fb02de06-1c83-4f2b-843e-958bf94d1695", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[34mannotations\u001b[m\u001b[m \u001b[34mimages\u001b[m\u001b[m \u001b[34mmodels\u001b[m\u001b[m \u001b[34mpre-trained-models\u001b[m\u001b[m\n", + "\n", + "Tensorflow/workspace/annotations:\n", + "label_map.pbtxt test_labels.csv train_labels.csv\n", + "test.record train.record\n", + "\n", + "Tensorflow/workspace/images:\n", + "\u001b[34mcollectedimages\u001b[m\u001b[m \u001b[34mtest\u001b[m\u001b[m \u001b[34mtrain\u001b[m\u001b[m\n", + "\n", + "Tensorflow/workspace/images/collectedimages:\n", + "\u001b[34mhello\u001b[m\u001b[m \u001b[34miloveyou\u001b[m\u001b[m \u001b[34mno\u001b[m\u001b[m \u001b[34mthanks\u001b[m\u001b[m \u001b[34myes\u001b[m\u001b[m\n", + "\n", + "Tensorflow/workspace/images/collectedimages/hello:\n", + "hello.1b79ecab-bac4-11f0-bf5a-5cbaef8df640.jpg\n", + "hello.1cb18098-bac4-11f0-9c2e-5cbaef8df640.jpg\n", + "hello.1de44c2e-bac4-11f0-9a08-5cbaef8df640.jpg\n", + "hello.1f1f9417-bac4-11f0-bf1c-5cbaef8df640.jpg\n", + "hello.20527c60-bac4-11f0-948e-5cbaef8df640.jpg\n", + "hello.2185addc-bac4-11f0-83fa-5cbaef8df640.jpg\n", + "hello.22b883c9-bac4-11f0-9c57-5cbaef8df640.jpg\n", + "hello.23eb6911-bac4-11f0-b1d3-5cbaef8df640.jpg\n", + "hello.25228e95-bac4-11f0-8c23-5cbaef8df640.jpg\n", + "hello.26563750-bac4-11f0-acdb-5cbaef8df640.jpg\n", + "hello.27896177-bac4-11f0-a12f-5cbaef8df640.jpg\n", + "hello.28bc669c-bac4-11f0-af1e-5cbaef8df640.jpg\n", + "hello.29ef7d3c-bac4-11f0-ab70-5cbaef8df640.jpg\n", + "hello.2b22b5f1-bac4-11f0-a320-5cbaef8df640.jpg\n", + "hello.2c592c1a-bac4-11f0-a7cc-5cbaef8df640.jpg\n", + "\n", + "Tensorflow/workspace/images/collectedimages/iloveyou:\n", + "iloveyou.802dd6fc-bac4-11f0-91ae-5cbaef8df640.jpg\n", + "iloveyou.8160b525-bac4-11f0-96f4-5cbaef8df640.jpg\n", + "iloveyou.82984a8e-bac4-11f0-bcd1-5cbaef8df640.jpg\n", + "iloveyou.83d03178-bac4-11f0-a4d2-5cbaef8df640.jpg\n", + "iloveyou.85088187-bac4-11f0-a265-5cbaef8df640.jpg\n", + "iloveyou.863bb039-bac4-11f0-a027-5cbaef8df640.jpg\n", + "iloveyou.876ed591-bac4-11f0-b414-5cbaef8df640.jpg\n", + "iloveyou.88a21c55-bac4-11f0-a332-5cbaef8df640.jpg\n", + "iloveyou.89de8c24-bac4-11f0-816c-5cbaef8df640.jpg\n", + "iloveyou.8b190010-bac4-11f0-89ac-5cbaef8df640.jpg\n", + "iloveyou.8c551a2e-bac4-11f0-ae2a-5cbaef8df640.jpg\n", + "iloveyou.8d90dcb0-bac4-11f0-a75f-5cbaef8df640.jpg\n", + "iloveyou.8ec39275-bac4-11f0-9943-5cbaef8df640.jpg\n", + "iloveyou.8ff66380-bac4-11f0-8ac3-5cbaef8df640.jpg\n", + "iloveyou.9130b2f5-bac4-11f0-9ee5-5cbaef8df640.jpg\n", + "\n", + "Tensorflow/workspace/images/collectedimages/no:\n", + "no.670e6e7e-bac4-11f0-b29f-5cbaef8df640.jpg\n", + "no.68417c0d-bac4-11f0-862c-5cbaef8df640.jpg\n", + "no.6974adaf-bac4-11f0-980e-5cbaef8df640.jpg\n", + "no.6aafd544-bac4-11f0-9bd0-5cbaef8df640.jpg\n", + "no.6be2ed99-bac4-11f0-a855-5cbaef8df640.jpg\n", + "no.6d1d6102-bac4-11f0-b3bb-5cbaef8df640.jpg\n", + "no.6e504eae-bac4-11f0-a6a6-5cbaef8df640.jpg\n", + "no.6f83445d-bac4-11f0-8e39-5cbaef8df640.jpg\n", + "no.70b638d7-bac4-11f0-b084-5cbaef8df640.jpg\n", + "no.71e8e6bd-bac4-11f0-af2a-5cbaef8df640.jpg\n", + "no.731c0dda-bac4-11f0-ad54-5cbaef8df640.jpg\n", + "no.744f306e-bac4-11f0-9ab7-5cbaef8df640.jpg\n", + "no.75834004-bac4-11f0-a7fa-5cbaef8df640.jpg\n", + "no.76b66b2e-bac4-11f0-8862-5cbaef8df640.jpg\n", + "no.77eeede0-bac4-11f0-ad40-5cbaef8df640.jpg\n", + "\n", + "Tensorflow/workspace/images/collectedimages/thanks:\n", + "thanks.34b2a8d7-bac4-11f0-bcb4-5cbaef8df640.jpg\n", + "thanks.35e5dd7d-bac4-11f0-ba11-5cbaef8df640.jpg\n", + "thanks.3718a6e5-bac4-11f0-9e98-5cbaef8df640.jpg\n", + "thanks.3853fd0e-bac4-11f0-b0c6-5cbaef8df640.jpg\n", + "thanks.398709fd-bac4-11f0-8cb2-5cbaef8df640.jpg\n", + "thanks.3ac0e6b6-bac4-11f0-b016-5cbaef8df640.jpg\n", + "thanks.3bf93d6d-bac4-11f0-b8ca-5cbaef8df640.jpg\n", + "thanks.3d2c85c1-bac4-11f0-a73c-5cbaef8df640.jpg\n", + "thanks.3e5f99c3-bac4-11f0-95eb-5cbaef8df640.jpg\n", + "thanks.3f9a0441-bac4-11f0-911f-5cbaef8df640.jpg\n", + "thanks.40ccf7c0-bac4-11f0-af20-5cbaef8df640.jpg\n", + "thanks.41ffd10f-bac4-11f0-a88e-5cbaef8df640.jpg\n", + "thanks.4332f3a3-bac4-11f0-918e-5cbaef8df640.jpg\n", + "thanks.44660a79-bac4-11f0-a87b-5cbaef8df640.jpg\n", + "thanks.45a0b4a4-bac4-11f0-81a3-5cbaef8df640.jpg\n", + "\n", + "Tensorflow/workspace/images/collectedimages/yes:\n", + "yes.4dc27c96-bac4-11f0-a869-5cbaef8df640.jpg\n", + "yes.4ef54869-bac4-11f0-b6c7-5cbaef8df640.jpg\n", + "yes.5028f404-bac4-11f0-9cc2-5cbaef8df640.jpg\n", + "yes.516242a6-bac4-11f0-be17-5cbaef8df640.jpg\n", + "yes.52955cc7-bac4-11f0-9267-5cbaef8df640.jpg\n", + "yes.53c8132e-bac4-11f0-9127-5cbaef8df640.jpg\n", + "yes.54fadd9e-bac4-11f0-b8f2-5cbaef8df640.jpg\n", + "yes.562e2350-bac4-11f0-b951-5cbaef8df640.jpg\n", + "yes.57614b4f-bac4-11f0-bf72-5cbaef8df640.jpg\n", + "yes.5897143a-bac4-11f0-a547-5cbaef8df640.jpg\n", + "yes.59cd997e-bac4-11f0-b5ad-5cbaef8df640.jpg\n", + "yes.5b00a5b1-bac4-11f0-b2fb-5cbaef8df640.jpg\n", + "yes.5c3397f9-bac4-11f0-b78d-5cbaef8df640.jpg\n", + "yes.5d6a1bc8-bac4-11f0-a353-5cbaef8df640.jpg\n", + "yes.5e9feab9-bac4-11f0-8f3d-5cbaef8df640.jpg\n", + "\n", + "Tensorflow/workspace/images/test:\n", + "\n", + "Tensorflow/workspace/images/train:\n", + "\n", + "Tensorflow/workspace/models:\n", + "\u001b[34mmy_ssd_mobnet\u001b[m\u001b[m\n", + "\n", + "Tensorflow/workspace/models/my_ssd_mobnet:\n", + "pipeline.config\n", + "\n", + "Tensorflow/workspace/pre-trained-models:\n", + "\u001b[34mssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8\u001b[m\u001b[m\n", + "ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8.tar.gz\n", + "\n", + "Tensorflow/workspace/pre-trained-models/ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8:\n", + "\u001b[34mcheckpoint\u001b[m\u001b[m pipeline.config \u001b[34msaved_model\u001b[m\u001b[m\n", + "\n", + "Tensorflow/workspace/pre-trained-models/ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8/checkpoint:\n", + "checkpoint ckpt-0.index\n", + "ckpt-0.data-00000-of-00001\n", + "\n", + "Tensorflow/workspace/pre-trained-models/ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8/saved_model:\n", + "saved_model.pb \u001b[34mvariables\u001b[m\u001b[m\n", + "\n", + "Tensorflow/workspace/pre-trained-models/ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8/saved_model/variables:\n", + "variables.data-00000-of-00001 variables.index\n" + ] + } + ], + "source": [ + "!ls -R Tensorflow/workspace\n" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "id": "b97969d3-9178-433e-a420-14526f43f181", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "pipeline.config\n" + ] + } + ], + "source": [ + "!ls -R Tensorflow/workspace/models/my_ssd_mobnet\n" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "id": "70eafea4-2e25-454f-aff1-a9ed924205f9", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "item { \n", + " id: 1\n", + " name: 'hello' \n", + "}\n", + "item { \n", + " id: 2\n", + " name: 'iloveyou' \n", + "}\n", + "item { \n", + " id: 3\n", + " name: 'no' \n", + "}\n", + "item { \n", + " id: 4\n", + " name: 'thanks' \n", + "}\n", + "item { \n", + " id: 5\n", + " name: 'yes' \n", + "}\n" + ] + } + ], + "source": [ + "# Show the label map content\n", + "with open('Tensorflow/workspace/annotations/label_map.pbtxt', 'r') as f:\n", + " print(f.read())\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "612d4f49-6a39-4703-a14b-029d6cf74fba", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.10" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/Tutorial.ipynb b/Tutorial.ipynb index 99fe06fd..64ca44c2 100644 --- a/Tutorial.ipynb +++ b/Tutorial.ipynb @@ -192,7 +192,10 @@ "cell_type": "code", "execution_count": 56, "metadata": { - "collapsed": true + "collapsed": true, + "jupyter": { + "outputs_hidden": true + } }, "outputs": [ { @@ -627,9 +630,9 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.4" + "version": "3.6.9" } }, "nbformat": 4, - "nbformat_minor": 2 + "nbformat_minor": 4 } diff --git a/Untitled1.ipynb b/Untitled1.ipynb new file mode 100644 index 00000000..426e90b5 --- /dev/null +++ b/Untitled1.ipynb @@ -0,0 +1,42 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import tensorflow as tf\n", + "tf.__version__\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.9" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +}