diff --git a/COCO_Evaluation.ipynb b/COCO_Evaluation.ipynb new file mode 100644 index 000000000..ca2e07f50 --- /dev/null +++ b/COCO_Evaluation.ipynb @@ -0,0 +1,576 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "name": "COCO_Evaluation.ipynb", + "provenance": [], + "collapsed_sections": [], + "toc_visible": true, + "authorship_tag": "ABX9TyPjg5f7zwfebGhai91U15pb", + "include_colab_link": true + }, + "kernelspec": { + "display_name": "Python 3", + "name": "python3" + } + }, + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "view-in-github", + "colab_type": "text" + }, + "source": [ + "\"Open" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ANIjJa0SskDI" + }, + "source": [ + "# EfficientDet Evaluation \n", + "\n", + "\n", + "\n", + "
\n", + " \n", + " View source on github\n", + " \n", + "\n", + " \n", + " Run in Google Colab\n", + "
" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "q4xF-IV6tFZF" + }, + "source": [ + "## Installing packages and donwloading source code/image" + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "background_save": true + }, + "id": "1uEKAUKjtMXY" + }, + "source": [ + "%%capture\n", + "#@title\n", + "import os\n", + "import sys\n", + "import tensorflow.compat.v1 as tf\n", + "\n", + "# Download source code.\n", + "if \"efficientdet\" not in os.getcwd():\n", + " !git clone --depth 1 https://github.com/google/automl\n", + " os.chdir('automl/efficientdet')\n", + " sys.path.append('.')\n", + " !pip install -r requirements.txt\n", + " !pip install -U 'git+https://github.com/cocodataset/cocoapi.git#subdirectory=PythonAPI'\n", + "else:\n", + " !git pull" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "DEkyGkSftOhv", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "72648260-40e1-4086-ac70-3725e63d2bea" + }, + "source": [ + "MODEL = 'efficientdet-d0' #@param\n", + "# the model name varies from d0 - d7 with increase in evaluation metircs of the model\n", + "def download(m):\n", + " if m not in os.listdir():\n", + " !wget https://storage.googleapis.com/cloud-tpu-checkpoints/efficientdet/coco/{m}.tar.gz\n", + " !tar zxf {m}.tar.gz\n", + " ckpt_path = os.path.join(os.getcwd(), m)\n", + " return ckpt_path\n", + "\n", + "# Download checkpoint.\n", + "ckpt_path = download(MODEL)\n", + "print('Use model in {}'.format(ckpt_path))\n", + "\n", + "# Prepare image and visualization settings.\n", + "image_url = 'https://user-images.githubusercontent.com/11736571/77320690-099af300-6d37-11ea-9d86-24f14dc2d540.png'#@param\n", + "image_name = 'img.png' #@param\n", + "!wget {image_url} -O img.png\n", + "import os\n", + "img_path = os.path.join(os.getcwd(), 'img.png')\n", + "\n", + "min_score_thresh = 0.35 #@param\n", + "max_boxes_to_draw = 200 #@param\n", + "line_thickness = 2#@param\n", + "\n", + "import PIL\n", + "# Get the largest of height/width and round to 128.\n", + "image_size = max(PIL.Image.open(img_path).size)" + ], + "execution_count": 2, + "outputs": [ + { + "output_type": "stream", + "text": [ + "--2021-02-15 08:03:41-- https://storage.googleapis.com/cloud-tpu-checkpoints/efficientdet/coco/efficientdet-d0.tar.gz\n", + "Resolving storage.googleapis.com (storage.googleapis.com)... 74.125.204.128, 64.233.187.128, 64.233.189.128, ...\n", + "Connecting to storage.googleapis.com (storage.googleapis.com)|74.125.204.128|:443... connected.\n", + "HTTP request sent, awaiting response... 200 OK\n", + "Length: 28994253 (28M) [application/octet-stream]\n", + "Saving to: ‘efficientdet-d0.tar.gz’\n", + "\n", + "efficientdet-d0.tar 100%[===================>] 27.65M 31.1MB/s in 0.9s \n", + "\n", + "2021-02-15 08:03:44 (31.1 MB/s) - ‘efficientdet-d0.tar.gz’ saved [28994253/28994253]\n", + "\n", + "Use model in /content/automl/efficientdet/efficientdet-d0\n", + "--2021-02-15 08:03:44-- https://user-images.githubusercontent.com/11736571/77320690-099af300-6d37-11ea-9d86-24f14dc2d540.png\n", + "Resolving user-images.githubusercontent.com (user-images.githubusercontent.com)... 185.199.110.133, 185.199.109.133, 185.199.111.133, ...\n", + "Connecting to user-images.githubusercontent.com (user-images.githubusercontent.com)|185.199.110.133|:443... connected.\n", + "HTTP request sent, awaiting response... 200 OK\n", + "Length: 4080549 (3.9M) [image/png]\n", + "Saving to: ‘img.png’\n", + "\n", + "img.png 100%[===================>] 3.89M --.-KB/s in 0.09s \n", + "\n", + "2021-02-15 08:03:45 (43.1 MB/s) - ‘img.png’ saved [4080549/4080549]\n", + "\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "vXHI6qsAuQYy" + }, + "source": [ + "# 3. Evaluating COCO Dataset" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "M2t3l5Z9uZnG" + }, + "source": [ + "## 3.1 Downloading COCO dataset and converint into tfrecords" + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "d7eSA5pNt2x2", + "outputId": "7ab11376-964b-4328-ba0b-fb2f9eb07a1f" + }, + "source": [ + "if 'val2017' not in os.listdir():\n", + " !wget http://images.cocodataset.org/zips/val2017.zip\n", + " !wget http://images.cocodataset.org/annotations/annotations_trainval2017.zip\n", + " !unzip -q val2017.zip\n", + " !unzip annotations_trainval2017.zip\n", + "\n", + " !mkdir tfrecord\n", + " !PYTHONPATH=\".:$PYTHONPATH\" python dataset/create_coco_tfrecord.py \\\n", + " --image_dir=val2017 \\\n", + " --caption_annotations_file=annotations/captions_val2017.json \\\n", + " --output_file_prefix=tfrecord/val \\\n", + " --num_shards=32" + ], + "execution_count": 3, + "outputs": [ + { + "output_type": "stream", + "text": [ + "--2021-02-15 08:06:47-- http://images.cocodataset.org/zips/val2017.zip\n", + "Resolving images.cocodataset.org (images.cocodataset.org)... 52.217.36.156\n", + "Connecting to images.cocodataset.org (images.cocodataset.org)|52.217.36.156|:80... connected.\n", + "HTTP request sent, awaiting response... 200 OK\n", + "Length: 815585330 (778M) [application/zip]\n", + "Saving to: ‘val2017.zip’\n", + "\n", + "val2017.zip 100%[===================>] 777.80M 16.9MB/s in 48s \n", + "\n", + "2021-02-15 08:07:35 (16.2 MB/s) - ‘val2017.zip’ saved [815585330/815585330]\n", + "\n", + "--2021-02-15 08:07:35-- http://images.cocodataset.org/annotations/annotations_trainval2017.zip\n", + "Resolving images.cocodataset.org (images.cocodataset.org)... 52.216.132.139\n", + "Connecting to images.cocodataset.org (images.cocodataset.org)|52.216.132.139|:80... connected.\n", + "HTTP request sent, awaiting response... 200 OK\n", + "Length: 252907541 (241M) [application/zip]\n", + "Saving to: ‘annotations_trainval2017.zip’\n", + "\n", + "annotations_trainva 100%[===================>] 241.19M 16.8MB/s in 16s \n", + "\n", + "2021-02-15 08:07:52 (15.2 MB/s) - ‘annotations_trainval2017.zip’ saved [252907541/252907541]\n", + "\n", + "Archive: annotations_trainval2017.zip\n", + " inflating: annotations/instances_train2017.json \n", + " inflating: annotations/instances_val2017.json \n", + " inflating: annotations/captions_train2017.json \n", + " inflating: annotations/captions_val2017.json \n", + " inflating: annotations/person_keypoints_train2017.json \n", + " inflating: annotations/person_keypoints_val2017.json \n", + "2021-02-15 08:08:07.232728: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.10.1\n", + "I0215 08:08:09.284386 139876403943296 create_coco_tfrecord.py:285] writing to output path: tfrecord/val\n", + "I0215 08:08:09.619695 139876403943296 create_coco_tfrecord.py:237] Building caption index.\n", + "I0215 08:08:09.626409 139876403943296 create_coco_tfrecord.py:249] 0 images are missing captions.\n", + "I0215 08:08:09.776298 139876403943296 create_coco_tfrecord.py:323] On image 0 of 5000\n", + "I0215 08:08:09.940375 139876403943296 create_coco_tfrecord.py:323] On image 100 of 5000\n", + "I0215 08:08:10.086762 139876403943296 create_coco_tfrecord.py:323] On image 200 of 5000\n", + "I0215 08:08:10.225423 139876403943296 create_coco_tfrecord.py:323] On image 300 of 5000\n", + "I0215 08:08:10.376513 139876403943296 create_coco_tfrecord.py:323] On image 400 of 5000\n", + "I0215 08:08:10.512861 139876403943296 create_coco_tfrecord.py:323] On image 500 of 5000\n", + "I0215 08:08:10.660977 139876403943296 create_coco_tfrecord.py:323] On image 600 of 5000\n", + "I0215 08:08:10.791800 139876403943296 create_coco_tfrecord.py:323] On image 700 of 5000\n", + "I0215 08:08:11.023546 139876403943296 create_coco_tfrecord.py:323] On image 800 of 5000\n", + "I0215 08:08:11.165428 139876403943296 create_coco_tfrecord.py:323] On image 900 of 5000\n", + "I0215 08:08:11.319842 139876403943296 create_coco_tfrecord.py:323] On image 1000 of 5000\n", + "I0215 08:08:11.481098 139876403943296 create_coco_tfrecord.py:323] On image 1100 of 5000\n", + "I0215 08:08:11.632011 139876403943296 create_coco_tfrecord.py:323] On image 1200 of 5000\n", + "I0215 08:08:11.771724 139876403943296 create_coco_tfrecord.py:323] On image 1300 of 5000\n", + "I0215 08:08:11.906335 139876403943296 create_coco_tfrecord.py:323] On image 1400 of 5000\n", + "I0215 08:08:12.064010 139876403943296 create_coco_tfrecord.py:323] On image 1500 of 5000\n", + "I0215 08:08:12.211949 139876403943296 create_coco_tfrecord.py:323] On image 1600 of 5000\n", + "I0215 08:08:12.356715 139876403943296 create_coco_tfrecord.py:323] On image 1700 of 5000\n", + "I0215 08:08:12.626727 139876403943296 create_coco_tfrecord.py:323] On image 1800 of 5000\n", + "I0215 08:08:12.774333 139876403943296 create_coco_tfrecord.py:323] On image 1900 of 5000\n", + "I0215 08:08:12.932412 139876403943296 create_coco_tfrecord.py:323] On image 2000 of 5000\n", + "I0215 08:08:13.085657 139876403943296 create_coco_tfrecord.py:323] On image 2100 of 5000\n", + "I0215 08:08:13.247072 139876403943296 create_coco_tfrecord.py:323] On image 2200 of 5000\n", + "I0215 08:08:13.411663 139876403943296 create_coco_tfrecord.py:323] On image 2300 of 5000\n", + "I0215 08:08:13.570645 139876403943296 create_coco_tfrecord.py:323] On image 2400 of 5000\n", + "I0215 08:08:13.728673 139876403943296 create_coco_tfrecord.py:323] On image 2500 of 5000\n", + "I0215 08:08:13.870792 139876403943296 create_coco_tfrecord.py:323] On image 2600 of 5000\n", + "I0215 08:08:14.177845 139876403943296 create_coco_tfrecord.py:323] On image 2700 of 5000\n", + "I0215 08:08:14.616946 139876403943296 create_coco_tfrecord.py:323] On image 2800 of 5000\n", + "I0215 08:08:15.110459 139876403943296 create_coco_tfrecord.py:323] On image 2900 of 5000\n", + "I0215 08:08:15.560891 139876403943296 create_coco_tfrecord.py:323] On image 3000 of 5000\n", + "I0215 08:08:15.966990 139876403943296 create_coco_tfrecord.py:323] On image 3100 of 5000\n", + "I0215 08:08:16.388808 139876403943296 create_coco_tfrecord.py:323] On image 3200 of 5000\n", + "I0215 08:08:16.826037 139876403943296 create_coco_tfrecord.py:323] On image 3300 of 5000\n", + "I0215 08:08:17.257006 139876403943296 create_coco_tfrecord.py:323] On image 3400 of 5000\n", + "I0215 08:08:17.665015 139876403943296 create_coco_tfrecord.py:323] On image 3500 of 5000\n", + "I0215 08:08:18.077958 139876403943296 create_coco_tfrecord.py:323] On image 3600 of 5000\n", + "I0215 08:08:18.566764 139876403943296 create_coco_tfrecord.py:323] On image 3700 of 5000\n", + "I0215 08:08:19.050907 139876403943296 create_coco_tfrecord.py:323] On image 3800 of 5000\n", + "I0215 08:08:22.784773 139876403943296 create_coco_tfrecord.py:323] On image 3900 of 5000\n", + "I0215 08:08:22.810469 139876403943296 create_coco_tfrecord.py:323] On image 4000 of 5000\n", + "I0215 08:08:22.833685 139876403943296 create_coco_tfrecord.py:323] On image 4100 of 5000\n", + "I0215 08:08:22.857089 139876403943296 create_coco_tfrecord.py:323] On image 4200 of 5000\n", + "I0215 08:08:22.879511 139876403943296 create_coco_tfrecord.py:323] On image 4300 of 5000\n", + "I0215 08:08:22.924930 139876403943296 create_coco_tfrecord.py:323] On image 4400 of 5000\n", + "I0215 08:08:23.059330 139876403943296 create_coco_tfrecord.py:323] On image 4500 of 5000\n", + "I0215 08:08:23.227055 139876403943296 create_coco_tfrecord.py:323] On image 4600 of 5000\n", + "I0215 08:08:23.450253 139876403943296 create_coco_tfrecord.py:323] On image 4700 of 5000\n", + "I0215 08:08:23.710522 139876403943296 create_coco_tfrecord.py:323] On image 4800 of 5000\n", + "I0215 08:08:24.064316 139876403943296 create_coco_tfrecord.py:323] On image 4900 of 5000\n", + "I0215 08:08:24.332943 139876403943296 create_coco_tfrecord.py:335] Finished writing, skipped 0 annotations.\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "MIXuSSBqukXE", + "outputId": "f810e133-9f99-4ea6-b500-5f226c57a1f9" + }, + "source": [ + "# Evalute on validation set (takes about 10 mins for efficientdet-d0)\n", + "# the model (currently efficientdet-do) can be changed in cell 2\n", + "!python main.py --mode=eval \\\n", + " --model_name={MODEL} --model_dir={ckpt_path} \\\n", + " --val_file_pattern=tfrecord/val* \\\n", + " --val_json_file=annotations/instances_val2017.json" + ], + "execution_count": 4, + "outputs": [ + { + "output_type": "stream", + "text": [ + "2021-02-15 08:10:45.712174: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.10.1\n", + "I0215 08:10:48.285151 140394613462912 main.py:264] {'name': 'efficientdet-d0', 'act_type': 'swish', 'image_size': (512, 512), 'target_size': None, 'input_rand_hflip': True, 'jitter_min': 0.1, 'jitter_max': 2.0, 'autoaugment_policy': None, 'grid_mask': False, 'sample_image': None, 'map_freq': 5, 'num_classes': 90, 'seg_num_classes': 3, 'heads': ['object_detection'], 'skip_crowd_during_training': True, 'label_map': None, 'max_instances_per_image': 100, 'regenerate_source_id': False, 'min_level': 3, 'max_level': 7, 'num_scales': 3, 'aspect_ratios': [1.0, 2.0, 0.5], 'anchor_scale': 4.0, 'is_training_bn': True, 'momentum': 0.9, 'optimizer': 'sgd', 'learning_rate': 0.08, 'lr_warmup_init': 0.008, 'lr_warmup_epoch': 1.0, 'first_lr_drop_epoch': 200.0, 'second_lr_drop_epoch': 250.0, 'poly_lr_power': 0.9, 'clip_gradients_norm': 10.0, 'num_epochs': 300, 'data_format': 'channels_last', 'label_smoothing': 0.0, 'alpha': 0.25, 'gamma': 1.5, 'delta': 0.1, 'box_loss_weight': 50.0, 'iou_loss_type': None, 'iou_loss_weight': 1.0, 'weight_decay': 4e-05, 'strategy': None, 'mixed_precision': False, 'loss_scale': None, 'model_optimizations': {}, 'box_class_repeats': 3, 'fpn_cell_repeats': 3, 'fpn_num_filters': 64, 'separable_conv': True, 'apply_bn_for_resampling': True, 'conv_after_downsample': False, 'conv_bn_act_pattern': False, 'drop_remainder': True, 'nms_configs': {'method': 'gaussian', 'iou_thresh': None, 'score_thresh': 0.0, 'sigma': None, 'pyfunc': False, 'max_nms_inputs': 0, 'max_output_size': 100}, 'fpn_name': None, 'fpn_weight_method': None, 'fpn_config': None, 'survival_prob': None, 'img_summary_steps': None, 'lr_decay_method': 'cosine', 'moving_average_decay': 0.9998, 'ckpt_var_scope': None, 'skip_mismatch': True, 'backbone_name': 'efficientnet-b0', 'backbone_config': None, 'var_freeze_expr': None, 'use_keras_model': True, 'dataset_type': None, 'positives_momentum': None, 'grad_checkpoint': False, 'model_name': 'efficientdet-d0', 'iterations_per_loop': 100, 'model_dir': '/content/automl/efficientdet/efficientdet-d0', 'num_shards': 8, 'num_examples_per_epoch': 120000, 'backbone_ckpt': '', 'ckpt': None, 'val_json_file': 'annotations/instances_val2017.json', 'testdev_dir': None, 'profile': False, 'mode': 'eval'}\n", + "INFO:tensorflow:Using config: {'_model_dir': '/content/automl/efficientdet/efficientdet-d0', '_tf_random_seed': None, '_save_summary_steps': 100, '_save_checkpoints_steps': 100, '_save_checkpoints_secs': None, '_session_config': allow_soft_placement: true\n", + ", '_keep_checkpoint_max': 5, '_keep_checkpoint_every_n_hours': 10000, '_log_step_count_steps': 100, '_train_distribute': None, '_device_fn': None, '_protocol': None, '_eval_distribute': None, '_experimental_distribute': None, '_experimental_max_worker_delay_secs': None, '_session_creation_timeout_secs': 7200, '_checkpoint_save_graph_def': True, '_service': None, '_cluster_spec': ClusterSpec({}), '_task_type': 'worker', '_task_id': 0, '_global_id_in_cluster': 0, '_master': '', '_evaluation_master': '', '_is_chief': True, '_num_ps_replicas': 0, '_num_worker_replicas': 1}\n", + "I0215 08:10:48.395855 140394613462912 estimator.py:191] Using config: {'_model_dir': '/content/automl/efficientdet/efficientdet-d0', '_tf_random_seed': None, '_save_summary_steps': 100, '_save_checkpoints_steps': 100, '_save_checkpoints_secs': None, '_session_config': allow_soft_placement: true\n", + ", '_keep_checkpoint_max': 5, '_keep_checkpoint_every_n_hours': 10000, '_log_step_count_steps': 100, '_train_distribute': None, '_device_fn': None, '_protocol': None, '_eval_distribute': None, '_experimental_distribute': None, '_experimental_max_worker_delay_secs': None, '_session_creation_timeout_secs': 7200, '_checkpoint_save_graph_def': True, '_service': None, '_cluster_spec': ClusterSpec({}), '_task_type': 'worker', '_task_id': 0, '_global_id_in_cluster': 0, '_master': '', '_evaluation_master': '', '_is_chief': True, '_num_ps_replicas': 0, '_num_worker_replicas': 1}\n", + "INFO:tensorflow:Using config: {'_model_dir': '/content/automl/efficientdet/efficientdet-d0', '_tf_random_seed': None, '_save_summary_steps': 100, '_save_checkpoints_steps': 100, '_save_checkpoints_secs': None, '_session_config': allow_soft_placement: true\n", + ", '_keep_checkpoint_max': 5, '_keep_checkpoint_every_n_hours': 10000, '_log_step_count_steps': 100, '_train_distribute': None, '_device_fn': None, '_protocol': None, '_eval_distribute': None, '_experimental_distribute': None, '_experimental_max_worker_delay_secs': None, '_session_creation_timeout_secs': 7200, '_checkpoint_save_graph_def': True, '_service': None, '_cluster_spec': ClusterSpec({}), '_task_type': 'worker', '_task_id': 0, '_global_id_in_cluster': 0, '_master': '', '_evaluation_master': '', '_is_chief': True, '_num_ps_replicas': 0, '_num_worker_replicas': 1}\n", + "I0215 08:10:48.397253 140394613462912 estimator.py:191] Using config: {'_model_dir': '/content/automl/efficientdet/efficientdet-d0', '_tf_random_seed': None, '_save_summary_steps': 100, '_save_checkpoints_steps': 100, '_save_checkpoints_secs': None, '_session_config': allow_soft_placement: true\n", + ", '_keep_checkpoint_max': 5, '_keep_checkpoint_every_n_hours': 10000, '_log_step_count_steps': 100, '_train_distribute': None, '_device_fn': None, '_protocol': None, '_eval_distribute': None, '_experimental_distribute': None, '_experimental_max_worker_delay_secs': None, '_session_creation_timeout_secs': 7200, '_checkpoint_save_graph_def': True, '_service': None, '_cluster_spec': ClusterSpec({}), '_task_type': 'worker', '_task_id': 0, '_global_id_in_cluster': 0, '_master': '', '_evaluation_master': '', '_is_chief': True, '_num_ps_replicas': 0, '_num_worker_replicas': 1}\n", + "INFO:tensorflow:Waiting for new checkpoint at /content/automl/efficientdet/efficientdet-d0\n", + "I0215 08:10:48.397713 140394613462912 checkpoint_utils.py:139] Waiting for new checkpoint at /content/automl/efficientdet/efficientdet-d0\n", + "INFO:tensorflow:Found new checkpoint at /content/automl/efficientdet/efficientdet-d0/model\n", + "I0215 08:10:48.398653 140394613462912 checkpoint_utils.py:148] Found new checkpoint at /content/automl/efficientdet/efficientdet-d0/model\n", + "I0215 08:10:48.398843 140394613462912 main.py:344] Starting to evaluate.\n", + "2021-02-15 08:10:48.627714: I tensorflow/compiler/jit/xla_cpu_device.cc:41] Not creating XLA devices, tf_xla_enable_xla_devices not set\n", + "2021-02-15 08:10:48.629010: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcuda.so.1\n", + "2021-02-15 08:10:48.693651: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:941] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n", + "2021-02-15 08:10:48.694437: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1720] Found device 0 with properties: \n", + "pciBusID: 0000:00:04.0 name: Tesla K80 computeCapability: 3.7\n", + "coreClock: 0.8235GHz coreCount: 13 deviceMemorySize: 11.17GiB deviceMemoryBandwidth: 223.96GiB/s\n", + "2021-02-15 08:10:48.694492: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.10.1\n", + "2021-02-15 08:10:48.917975: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcublas.so.10\n", + "2021-02-15 08:10:48.918099: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcublasLt.so.10\n", + "2021-02-15 08:10:49.035688: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcufft.so.10\n", + "2021-02-15 08:10:49.075830: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcurand.so.10\n", + "2021-02-15 08:10:49.320592: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcusolver.so.10\n", + "2021-02-15 08:10:49.375899: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcusparse.so.10\n", + "2021-02-15 08:10:49.846333: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudnn.so.7\n", + "2021-02-15 08:10:49.846547: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:941] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n", + "2021-02-15 08:10:49.847750: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:941] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n", + "2021-02-15 08:10:49.868450: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1862] Adding visible gpu devices: 0\n", + "INFO:tensorflow:Calling model_fn.\n", + "I0215 08:10:50.559364 140394613462912 estimator.py:1162] Calling model_fn.\n", + "I0215 08:10:50.566413 140394613462912 efficientnet_builder.py:215] global_params= GlobalParams(batch_norm_momentum=0.99, batch_norm_epsilon=0.001, dropout_rate=0.2, data_format='channels_last', num_classes=1000, width_coefficient=1.0, depth_coefficient=1.0, depth_divisor=8, min_depth=None, survival_prob=0.0, relu_fn=functools.partial(, act_type='swish'), batch_norm=, use_se=True, local_pooling=None, condconv_num_experts=None, clip_projection_output=False, blocks_args=['r1_k3_s11_e1_i32_o16_se0.25', 'r2_k3_s22_e6_i16_o24_se0.25', 'r2_k5_s22_e6_i24_o40_se0.25', 'r3_k3_s22_e6_i40_o80_se0.25', 'r3_k5_s11_e6_i80_o112_se0.25', 'r4_k5_s22_e6_i112_o192_se0.25', 'r1_k3_s11_e6_i192_o320_se0.25'], fix_head_stem=None, grad_checkpoint=False)\n", + "I0215 08:10:50.895886 140394613462912 efficientdet_keras.py:749] fnode 0 : {'feat_level': 6, 'inputs_offsets': [3, 4]}\n", + "I0215 08:10:50.897055 140394613462912 efficientdet_keras.py:749] fnode 1 : {'feat_level': 5, 'inputs_offsets': [2, 5]}\n", + "I0215 08:10:50.898112 140394613462912 efficientdet_keras.py:749] fnode 2 : {'feat_level': 4, 'inputs_offsets': [1, 6]}\n", + "I0215 08:10:50.899170 140394613462912 efficientdet_keras.py:749] fnode 3 : {'feat_level': 3, 'inputs_offsets': [0, 7]}\n", + "I0215 08:10:50.900268 140394613462912 efficientdet_keras.py:749] fnode 4 : {'feat_level': 4, 'inputs_offsets': [1, 7, 8]}\n", + "I0215 08:10:50.901242 140394613462912 efficientdet_keras.py:749] fnode 5 : {'feat_level': 5, 'inputs_offsets': [2, 6, 9]}\n", + "I0215 08:10:50.902491 140394613462912 efficientdet_keras.py:749] fnode 6 : {'feat_level': 6, 'inputs_offsets': [3, 5, 10]}\n", + "I0215 08:10:50.903546 140394613462912 efficientdet_keras.py:749] fnode 7 : {'feat_level': 7, 'inputs_offsets': [4, 11]}\n", + "I0215 08:10:50.905227 140394613462912 efficientdet_keras.py:749] fnode 0 : {'feat_level': 6, 'inputs_offsets': [3, 4]}\n", + "I0215 08:10:50.906406 140394613462912 efficientdet_keras.py:749] fnode 1 : {'feat_level': 5, 'inputs_offsets': [2, 5]}\n", + "I0215 08:10:50.907436 140394613462912 efficientdet_keras.py:749] fnode 2 : {'feat_level': 4, 'inputs_offsets': [1, 6]}\n", + "I0215 08:10:50.908470 140394613462912 efficientdet_keras.py:749] fnode 3 : {'feat_level': 3, 'inputs_offsets': [0, 7]}\n", + "I0215 08:10:50.909642 140394613462912 efficientdet_keras.py:749] fnode 4 : {'feat_level': 4, 'inputs_offsets': [1, 7, 8]}\n", + "I0215 08:10:50.910905 140394613462912 efficientdet_keras.py:749] fnode 5 : {'feat_level': 5, 'inputs_offsets': [2, 6, 9]}\n", + "I0215 08:10:50.911958 140394613462912 efficientdet_keras.py:749] fnode 6 : {'feat_level': 6, 'inputs_offsets': [3, 5, 10]}\n", + "I0215 08:10:50.912943 140394613462912 efficientdet_keras.py:749] fnode 7 : {'feat_level': 7, 'inputs_offsets': [4, 11]}\n", + "I0215 08:10:50.914578 140394613462912 efficientdet_keras.py:749] fnode 0 : {'feat_level': 6, 'inputs_offsets': [3, 4]}\n", + "I0215 08:10:50.915685 140394613462912 efficientdet_keras.py:749] fnode 1 : {'feat_level': 5, 'inputs_offsets': [2, 5]}\n", + "I0215 08:10:50.916701 140394613462912 efficientdet_keras.py:749] fnode 2 : {'feat_level': 4, 'inputs_offsets': [1, 6]}\n", + "I0215 08:10:50.917805 140394613462912 efficientdet_keras.py:749] fnode 3 : {'feat_level': 3, 'inputs_offsets': [0, 7]}\n", + "I0215 08:10:50.918872 140394613462912 efficientdet_keras.py:749] fnode 4 : {'feat_level': 4, 'inputs_offsets': [1, 7, 8]}\n", + "I0215 08:10:50.919909 140394613462912 efficientdet_keras.py:749] fnode 5 : {'feat_level': 5, 'inputs_offsets': [2, 6, 9]}\n", + "I0215 08:10:50.921087 140394613462912 efficientdet_keras.py:749] fnode 6 : {'feat_level': 6, 'inputs_offsets': [3, 5, 10]}\n", + "I0215 08:10:50.922116 140394613462912 efficientdet_keras.py:749] fnode 7 : {'feat_level': 7, 'inputs_offsets': [4, 11]}\n", + "I0215 08:10:51.035507 140394613462912 efficientnet_model.py:735] Built stem stem : (1, 256, 256, 32)\n", + "I0215 08:10:51.036176 140394613462912 efficientnet_model.py:374] Block blocks_0 input shape: (1, 256, 256, 32)\n", + "I0215 08:10:51.064666 140394613462912 efficientnet_model.py:393] DWConv shape: (1, 256, 256, 32)\n", + "I0215 08:10:51.093082 140394613462912 efficientnet_model.py:195] Built SE se : (1, 1, 1, 32)\n", + "I0215 08:10:51.119867 140394613462912 efficientnet_model.py:414] Project shape: (1, 256, 256, 16)\n", + "I0215 08:10:51.120517 140394613462912 efficientnet_model.py:374] Block blocks_1 input shape: (1, 256, 256, 16)\n", + "I0215 08:10:51.147840 140394613462912 efficientnet_model.py:390] Expand shape: (1, 256, 256, 96)\n", + "I0215 08:10:51.182270 140394613462912 efficientnet_model.py:393] DWConv shape: (1, 128, 128, 96)\n", + "I0215 08:10:51.211130 140394613462912 efficientnet_model.py:195] Built SE se : (1, 1, 1, 96)\n", + "I0215 08:10:51.238853 140394613462912 efficientnet_model.py:414] Project shape: (1, 128, 128, 24)\n", + "I0215 08:10:51.239474 140394613462912 efficientnet_model.py:374] Block blocks_2 input shape: (1, 128, 128, 24)\n", + "I0215 08:10:51.268710 140394613462912 efficientnet_model.py:390] Expand shape: (1, 128, 128, 144)\n", + "I0215 08:10:51.298249 140394613462912 efficientnet_model.py:393] DWConv shape: (1, 128, 128, 144)\n", + "I0215 08:10:51.328128 140394613462912 efficientnet_model.py:195] Built SE se : (1, 1, 1, 144)\n", + "I0215 08:10:51.356158 140394613462912 efficientnet_model.py:414] Project shape: (1, 128, 128, 24)\n", + "I0215 08:10:51.356903 140394613462912 efficientnet_model.py:374] Block blocks_3 input shape: (1, 128, 128, 24)\n", + "I0215 08:10:51.385811 140394613462912 efficientnet_model.py:390] Expand shape: (1, 128, 128, 144)\n", + "I0215 08:10:51.414741 140394613462912 efficientnet_model.py:393] DWConv shape: (1, 64, 64, 144)\n", + "I0215 08:10:51.444014 140394613462912 efficientnet_model.py:195] Built SE se : (1, 1, 1, 144)\n", + "I0215 08:10:51.473085 140394613462912 efficientnet_model.py:414] Project shape: (1, 64, 64, 40)\n", + "I0215 08:10:51.474085 140394613462912 efficientnet_model.py:374] Block blocks_4 input shape: (1, 64, 64, 40)\n", + "I0215 08:10:51.502791 140394613462912 efficientnet_model.py:390] Expand shape: (1, 64, 64, 240)\n", + "I0215 08:10:51.532240 140394613462912 efficientnet_model.py:393] DWConv shape: (1, 64, 64, 240)\n", + "I0215 08:10:51.561150 140394613462912 efficientnet_model.py:195] Built SE se : (1, 1, 1, 240)\n", + "I0215 08:10:51.589206 140394613462912 efficientnet_model.py:414] Project shape: (1, 64, 64, 40)\n", + "I0215 08:10:51.589968 140394613462912 efficientnet_model.py:374] Block blocks_5 input shape: (1, 64, 64, 40)\n", + "I0215 08:10:51.623336 140394613462912 efficientnet_model.py:390] Expand shape: (1, 64, 64, 240)\n", + "I0215 08:10:51.654567 140394613462912 efficientnet_model.py:393] DWConv shape: (1, 32, 32, 240)\n", + "I0215 08:10:51.685555 140394613462912 efficientnet_model.py:195] Built SE se : (1, 1, 1, 240)\n", + "I0215 08:10:51.713986 140394613462912 efficientnet_model.py:414] Project shape: (1, 32, 32, 80)\n", + "I0215 08:10:51.714636 140394613462912 efficientnet_model.py:374] Block blocks_6 input shape: (1, 32, 32, 80)\n", + "I0215 08:10:51.742813 140394613462912 efficientnet_model.py:390] Expand shape: (1, 32, 32, 480)\n", + "I0215 08:10:51.773987 140394613462912 efficientnet_model.py:393] DWConv shape: (1, 32, 32, 480)\n", + "I0215 08:10:51.806489 140394613462912 efficientnet_model.py:195] Built SE se : (1, 1, 1, 480)\n", + "I0215 08:10:51.837665 140394613462912 efficientnet_model.py:414] Project shape: (1, 32, 32, 80)\n", + "I0215 08:10:51.838323 140394613462912 efficientnet_model.py:374] Block blocks_7 input shape: (1, 32, 32, 80)\n", + "I0215 08:10:51.871118 140394613462912 efficientnet_model.py:390] Expand shape: (1, 32, 32, 480)\n", + "I0215 08:10:51.901251 140394613462912 efficientnet_model.py:393] DWConv shape: (1, 32, 32, 480)\n", + "I0215 08:10:51.930241 140394613462912 efficientnet_model.py:195] Built SE se : (1, 1, 1, 480)\n", + "I0215 08:10:51.957861 140394613462912 efficientnet_model.py:414] Project shape: (1, 32, 32, 80)\n", + "I0215 08:10:51.958518 140394613462912 efficientnet_model.py:374] Block blocks_8 input shape: (1, 32, 32, 80)\n", + "I0215 08:10:51.987040 140394613462912 efficientnet_model.py:390] Expand shape: (1, 32, 32, 480)\n", + "I0215 08:10:52.016878 140394613462912 efficientnet_model.py:393] DWConv shape: (1, 32, 32, 480)\n", + "I0215 08:10:52.046546 140394613462912 efficientnet_model.py:195] Built SE se : (1, 1, 1, 480)\n", + "I0215 08:10:52.075405 140394613462912 efficientnet_model.py:414] Project shape: (1, 32, 32, 112)\n", + "I0215 08:10:52.076481 140394613462912 efficientnet_model.py:374] Block blocks_9 input shape: (1, 32, 32, 112)\n", + "I0215 08:10:52.107595 140394613462912 efficientnet_model.py:390] Expand shape: (1, 32, 32, 672)\n", + "I0215 08:10:52.137681 140394613462912 efficientnet_model.py:393] DWConv shape: (1, 32, 32, 672)\n", + "I0215 08:10:52.171384 140394613462912 efficientnet_model.py:195] Built SE se : (1, 1, 1, 672)\n", + "I0215 08:10:52.199980 140394613462912 efficientnet_model.py:414] Project shape: (1, 32, 32, 112)\n", + "I0215 08:10:52.200653 140394613462912 efficientnet_model.py:374] Block blocks_10 input shape: (1, 32, 32, 112)\n", + "I0215 08:10:52.230742 140394613462912 efficientnet_model.py:390] Expand shape: (1, 32, 32, 672)\n", + "I0215 08:10:52.260191 140394613462912 efficientnet_model.py:393] DWConv shape: (1, 32, 32, 672)\n", + "I0215 08:10:52.290162 140394613462912 efficientnet_model.py:195] Built SE se : (1, 1, 1, 672)\n", + "I0215 08:10:52.317845 140394613462912 efficientnet_model.py:414] Project shape: (1, 32, 32, 112)\n", + "I0215 08:10:52.318554 140394613462912 efficientnet_model.py:374] Block blocks_11 input shape: (1, 32, 32, 112)\n", + "I0215 08:10:52.347153 140394613462912 efficientnet_model.py:390] Expand shape: (1, 32, 32, 672)\n", + "I0215 08:10:52.378805 140394613462912 efficientnet_model.py:393] DWConv shape: (1, 16, 16, 672)\n", + "I0215 08:10:52.409205 140394613462912 efficientnet_model.py:195] Built SE se : (1, 1, 1, 672)\n", + "I0215 08:10:52.436485 140394613462912 efficientnet_model.py:414] Project shape: (1, 16, 16, 192)\n", + "I0215 08:10:52.437105 140394613462912 efficientnet_model.py:374] Block blocks_12 input shape: (1, 16, 16, 192)\n", + "I0215 08:10:52.470740 140394613462912 efficientnet_model.py:390] Expand shape: (1, 16, 16, 1152)\n", + "I0215 08:10:52.506080 140394613462912 efficientnet_model.py:393] DWConv shape: (1, 16, 16, 1152)\n", + "I0215 08:10:52.536857 140394613462912 efficientnet_model.py:195] Built SE se : (1, 1, 1, 1152)\n", + "I0215 08:10:52.564273 140394613462912 efficientnet_model.py:414] Project shape: (1, 16, 16, 192)\n", + "I0215 08:10:52.565061 140394613462912 efficientnet_model.py:374] Block blocks_13 input shape: (1, 16, 16, 192)\n", + "I0215 08:10:52.599982 140394613462912 efficientnet_model.py:390] Expand shape: (1, 16, 16, 1152)\n", + "I0215 08:10:52.634805 140394613462912 efficientnet_model.py:393] DWConv shape: (1, 16, 16, 1152)\n", + "I0215 08:10:52.665519 140394613462912 efficientnet_model.py:195] Built SE se : (1, 1, 1, 1152)\n", + "I0215 08:10:52.696212 140394613462912 efficientnet_model.py:414] Project shape: (1, 16, 16, 192)\n", + "I0215 08:10:52.696923 140394613462912 efficientnet_model.py:374] Block blocks_14 input shape: (1, 16, 16, 192)\n", + "I0215 08:10:52.731099 140394613462912 efficientnet_model.py:390] Expand shape: (1, 16, 16, 1152)\n", + "I0215 08:10:52.765751 140394613462912 efficientnet_model.py:393] DWConv shape: (1, 16, 16, 1152)\n", + "I0215 08:10:52.797348 140394613462912 efficientnet_model.py:195] Built SE se : (1, 1, 1, 1152)\n", + "I0215 08:10:52.830531 140394613462912 efficientnet_model.py:414] Project shape: (1, 16, 16, 192)\n", + "I0215 08:10:52.831233 140394613462912 efficientnet_model.py:374] Block blocks_15 input shape: (1, 16, 16, 192)\n", + "I0215 08:10:52.865934 140394613462912 efficientnet_model.py:390] Expand shape: (1, 16, 16, 1152)\n", + "I0215 08:10:52.901147 140394613462912 efficientnet_model.py:393] DWConv shape: (1, 16, 16, 1152)\n", + "I0215 08:10:52.932100 140394613462912 efficientnet_model.py:195] Built SE se : (1, 1, 1, 1152)\n", + "I0215 08:10:52.959492 140394613462912 efficientnet_model.py:414] Project shape: (1, 16, 16, 320)\n", + "Traceback (most recent call last):\n", + " File \"/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/resource_variable_ops.py\", line 1748, in _init_from_args\n", + " gen_resource_variable_ops.var_is_initialized_op(handle))\n", + " File \"/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/gen_resource_variable_ops.py\", line 1278, in var_is_initialized_op\n", + " \"VarIsInitializedOp\", resource=resource, name=name)\n", + " File \"/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/op_def_library.py\", line 750, in _apply_op_helper\n", + " attrs=attr_protos, op_def=op_def)\n", + " File \"/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/ops.py\", line 3536, in _create_op_internal\n", + " op_def=op_def)\n", + " File \"/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/ops.py\", line 2027, in __init__\n", + " output_type = pywrap_tf_session.TF_OperationOutputType(tf_output)\n", + "KeyboardInterrupt\n", + "\n", + "During handling of the above exception, another exception occurred:\n", + "\n", + "Traceback (most recent call last):\n", + " File \"main.py\", line 402, in \n", + " app.run(main)\n", + " File \"/usr/local/lib/python3.6/dist-packages/absl/app.py\", line 300, in run\n", + " _run_main(main, args)\n", + " File \"/usr/local/lib/python3.6/dist-packages/absl/app.py\", line 251, in _run_main\n", + " sys.exit(main(argv))\n", + " File \"main.py\", line 346, in main\n", + " eval_results = eval_est.evaluate(eval_input_fn, steps=eval_steps)\n", + " File \"/usr/local/lib/python3.6/dist-packages/tensorflow_estimator/python/estimator/estimator.py\", line 467, in evaluate\n", + " name=name)\n", + " File \"/usr/local/lib/python3.6/dist-packages/tensorflow_estimator/python/estimator/estimator.py\", line 510, in _actual_eval\n", + " return _evaluate()\n", + " File \"/usr/local/lib/python3.6/dist-packages/tensorflow_estimator/python/estimator/estimator.py\", line 492, in _evaluate\n", + " self._evaluate_build_graph(input_fn, hooks, checkpoint_path))\n", + " File \"/usr/local/lib/python3.6/dist-packages/tensorflow_estimator/python/estimator/estimator.py\", line 1531, in _evaluate_build_graph\n", + " self._call_model_fn_eval(input_fn, self.config))\n", + " File \"/usr/local/lib/python3.6/dist-packages/tensorflow_estimator/python/estimator/estimator.py\", line 1567, in _call_model_fn_eval\n", + " config)\n", + " File \"/usr/local/lib/python3.6/dist-packages/tensorflow_estimator/python/estimator/estimator.py\", line 1163, in _call_model_fn\n", + " model_fn_results = self._model_fn(features=features, **kwargs)\n", + " File \"/content/automl/efficientdet/det_model_fn.py\", line 618, in efficientdet_model_fn\n", + " variable_filter_fn=variable_filter_fn)\n", + " File \"/usr/local/lib/python3.6/dist-packages/tensorflow/python/autograph/impl/api.py\", line 620, in wrapper\n", + " return func(*args, **kwargs)\n", + " File \"/content/automl/efficientdet/det_model_fn.py\", line 344, in _model_fn\n", + " precision, model_fn, features)\n", + " File \"/content/automl/efficientdet/utils.py\", line 631, in build_model_with_precision\n", + " outputs = mm(ii, *args, **kwargs)\n", + " File \"/content/automl/efficientdet/det_model_fn.py\", line 333, in model_fn\n", + " cls_out_list, box_out_list = model(inputs, params['is_training_bn'])\n", + " File \"/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/base_layer_v1.py\", line 786, in __call__\n", + " outputs = call_fn(cast_inputs, *args, **kwargs)\n", + " File \"/usr/local/lib/python3.6/dist-packages/tensorflow/python/autograph/impl/api.py\", line 620, in wrapper\n", + " return func(*args, **kwargs)\n", + " File \"/content/automl/efficientdet/keras/efficientdet_keras.py\", line 894, in call\n", + " fpn_feats = self.fpn_cells(feats, training)\n", + " File \"/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/base_layer_v1.py\", line 786, in __call__\n", + " outputs = call_fn(cast_inputs, *args, **kwargs)\n", + " File \"/usr/local/lib/python3.6/dist-packages/tensorflow/python/autograph/impl/api.py\", line 620, in wrapper\n", + " return func(*args, **kwargs)\n", + " File \"/content/automl/efficientdet/keras/efficientdet_keras.py\", line 720, in call\n", + " cell_feats = cell(feats, training)\n", + " File \"/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/base_layer_v1.py\", line 786, in __call__\n", + " outputs = call_fn(cast_inputs, *args, **kwargs)\n", + " File \"/usr/local/lib/python3.6/dist-packages/tensorflow/python/autograph/impl/api.py\", line 620, in wrapper\n", + " return func(*args, **kwargs)\n", + " File \"/content/automl/efficientdet/keras/efficientdet_keras.py\", line 773, in call\n", + " return _call(feats)\n", + " File \"/content/automl/efficientdet/keras/efficientdet_keras.py\", line 771, in _call\n", + " feats = fnode(feats, training)\n", + " File \"/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/base_layer_v1.py\", line 786, in __call__\n", + " outputs = call_fn(cast_inputs, *args, **kwargs)\n", + " File \"/usr/local/lib/python3.6/dist-packages/tensorflow/python/autograph/impl/api.py\", line 620, in wrapper\n", + " return func(*args, **kwargs)\n", + " File \"/content/automl/efficientdet/keras/efficientdet_keras.py\", line 184, in call\n", + " new_node = self.op_after_combine(new_node)\n", + " File \"/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/base_layer_v1.py\", line 786, in __call__\n", + " outputs = call_fn(cast_inputs, *args, **kwargs)\n", + " File \"/usr/local/lib/python3.6/dist-packages/tensorflow/python/autograph/impl/api.py\", line 620, in wrapper\n", + " return func(*args, **kwargs)\n", + " File \"/content/automl/efficientdet/keras/efficientdet_keras.py\", line 236, in call\n", + " new_node = self.bn(new_node, training=training)\n", + " File \"/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/base_layer_v1.py\", line 766, in __call__\n", + " self._maybe_build(inputs)\n", + " File \"/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/base_layer_v1.py\", line 2106, in _maybe_build\n", + " self.build(input_shapes)\n", + " File \"/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/layers/normalization.py\", line 449, in build\n", + " experimental_autocast=False)\n", + " File \"/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/base_layer_v1.py\", line 457, in add_weight\n", + " caching_device=caching_device)\n", + " File \"/usr/local/lib/python3.6/dist-packages/tensorflow/python/training/tracking/base.py\", line 810, in _add_variable_with_custom_getter\n", + " **kwargs_for_getter)\n", + " File \"/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/base_layer_utils.py\", line 142, in make_variable\n", + " shape=variable_shape if variable_shape else None)\n", + " File \"/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/variables.py\", line 260, in __call__\n", + " return cls._variable_v1_call(*args, **kwargs)\n", + " File \"/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/variables.py\", line 221, in _variable_v1_call\n", + " shape=shape)\n", + " File \"/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/variables.py\", line 199, in \n", + " previous_getter = lambda **kwargs: default_variable_creator(None, **kwargs)\n", + " File \"/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/variable_scope.py\", line 2618, in default_variable_creator\n", + " shape=shape)\n", + " File \"/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/variables.py\", line 264, in __call__\n", + " return super(VariableMetaclass, cls).__call__(*args, **kwargs)\n", + " File \"/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/resource_variable_ops.py\", line 1585, in __init__\n", + " distribute_strategy=distribute_strategy)\n", + " File \"/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/resource_variable_ops.py\", line 1748, in _init_from_args\n", + " gen_resource_variable_ops.var_is_initialized_op(handle))\n", + "KeyboardInterrupt\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "u8FNQ6CvveZq" + }, + "source": [ + "" + ], + "execution_count": null, + "outputs": [] + } + ] +} diff --git a/README.md b/README.md index eb3b0b9d1..a45ed184a 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,6 @@ # Brain AutoML This repository contains a list of AutoML related models and libraries. + + +COCO_Evaluation.ipynb is the edited file for coco validation dataset evaluation