Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add several inference files. #1785

Open
wants to merge 30 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
30 commits
Select commit Hold shift + click to select a range
6b169e7
Add inference files
iamstarlee May 28, 2024
ce85224
Add files via upload
iamstarlee May 29, 2024
285ce2e
Add files via upload
iamstarlee Jun 7, 2024
bdf3257
Add files via upload
iamstarlee Jun 11, 2024
84246b2
Update Inference_img_0529.py
iamstarlee Jun 11, 2024
3110c67
Update Inference_video_0529.py
iamstarlee Jun 11, 2024
a2841ba
Add files via upload
iamstarlee Jun 11, 2024
5602253
update 0611
iamstarlee Jun 11, 2024
437929b
update same file
iamstarlee Jun 11, 2024
d3462f5
update two files
iamstarlee Jun 11, 2024
39fcb7e
update one
iamstarlee Jun 11, 2024
0224e1c
update three files
iamstarlee Jun 11, 2024
5f15164
Copying is vital when cropping
iamstarlee Jun 12, 2024
558b316
add two files to crop specific patches
iamstarlee Jun 12, 2024
99e5c03
update two files
iamstarlee Jun 13, 2024
1ca65c6
update two files
iamstarlee Jun 14, 2024
6b9a9ed
add mkdir
iamstarlee Jun 14, 2024
5fd5bca
add total infer time
iamstarlee Jun 14, 2024
4727e8c
update 0615
iamstarlee Jun 15, 2024
9c2ab6a
change threshold
iamstarlee Jun 15, 2024
5ee959f
add floor level
iamstarlee Jun 16, 2024
6d853d7
update demo_utils.py
iamstarlee Jun 24, 2024
5bd87de
correct a comment
iamstarlee Jun 24, 2024
7f95c37
update demo.py
iamstarlee Jun 25, 2024
9a8690e
update export_onnx.py
iamstarlee Jun 25, 2024
dccfa9f
update these
iamstarlee Jun 26, 2024
3ff1f93
update model_utils.py
iamstarlee Jun 28, 2024
11f3cbd
update Ideas.ipynb
iamstarlee Jun 28, 2024
35e4282
update0705
iamstarlee Jul 5, 2024
e45a217
test run time
iamstarlee Jul 5, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
459 changes: 459 additions & 0 deletions Ideas.ipynb

Large diffs are not rendered by default.

154 changes: 154 additions & 0 deletions Inference.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,154 @@
import argparse
import os

import cv2
import numpy as np
import torch

from loguru import logger
import time
import onnxruntime
import torchvision.transforms as transforms

from yolox.data.data_augment import preproc as preprocess
from yolox.utils import mkdir, multiclass_nms, demo_postprocess, vis
from PIL import Image


CLASSES = (
'people','car',
)

def make_parser():
parser = argparse.ArgumentParser("onnxruntime inference sample")
parser.add_argument(
"--model",
type=str,
default="/home/whoami/Documents/Hanvon/yoloxs_0528.onnx",
help="Input your onnx model.",
)
parser.add_argument(
"--mode",
type=str,
default="video",
help="mode type, eg. image, video and webcam.",
)
parser.add_argument(
"--input_path",
type=str,
default='/home/whoami/Videos/20230101_005748_vflip.MP4',
help="Path to your input image.",
)
parser.add_argument(
"--camid",
type=int,
default=0,
help="webcam demo camera id",
)
parser.add_argument(
"--output_path",
type=str,
default='outputs_videos',
help="Path to your output directory.",
)
parser.add_argument(
"-s",
"--score_thr",
type=float,
default=0.3,
help="Score threshould to filter the result.",
)
parser.add_argument(
"--input_shape",
type=str,
default="320,320",
help="Specify an input shape for inference.",
)
parser.add_argument(
"--with_p6",
action="store_true",
help="Whether your model uses p6 in FPN/PAN.",
)
return parser


def inference(args, origin_img):
t0 = time.time()
input_shape = tuple(map(int, args.input_shape.split(',')))

img, ratio = preprocess(origin_img, input_shape)
session = onnxruntime.InferenceSession(args.model)

ort_inputs = {session.get_inputs()[0].name: img[None, :, :, :]}
output = session.run(None, ort_inputs)
# print(output[0].shape)
predictions = demo_postprocess(output[0], input_shape, p6=args.with_p6)[0]

boxes = predictions[:, :4]
scores = predictions[:, 4:5] * predictions[:, 5:]

boxes_xyxy = np.ones_like(boxes)
boxes_xyxy[:, 0] = boxes[:, 0] - boxes[:, 2]/2.
boxes_xyxy[:, 1] = boxes[:, 1] - boxes[:, 3]/2.
boxes_xyxy[:, 2] = boxes[:, 0] + boxes[:, 2]/2.
boxes_xyxy[:, 3] = boxes[:, 1] + boxes[:, 3]/2.
boxes_xyxy /= ratio
dets = multiclass_nms(boxes_xyxy, scores, nms_thr=0.5, score_thr=0.5)
if dets is not None:
final_boxes, final_scores, final_cls_inds = dets[:, :4], dets[:, 4], dets[:, 5]
origin_img = vis(origin_img, final_boxes, final_scores, final_cls_inds,
conf=args.score_thr, class_names=CLASSES)

logger.info("Infer time: {:.4f}s".format(time.time() - t0))
return origin_img

def image_process(args):
origin_img = cv2.imread(args.input_path)
origin_img = inference(args, origin_img)
mkdir(args.output_path)
output_path = os.path.join(args.output_path, args.input_path.split("/")[-1])
logger.info("Saving detection result in {}".format(output_path))
cv2.imwrite(output_path, origin_img)

def imageflow_demo(args):
cap = cv2.VideoCapture(args.input_path if args.mode == "video" else args.camid)
width = cap.get(cv2.CAP_PROP_FRAME_WIDTH) # float
height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT) # float
fps = cap.get(cv2.CAP_PROP_FPS)

mkdir(args.output_path)
current_time = time.localtime()
save_folder = os.path.join(
args.output_path, time.strftime("%Y_%m_%d_%H_%M_%S", current_time)
)
os.makedirs(save_folder, exist_ok=True)
if args.mode == "video":
save_path = os.path.join(save_folder, args.input_path.split("/")[-1])
else:
save_path = os.path.join(save_folder, "camera.mp4")

logger.info(f"video save_path is {save_path}")
vid_writer = cv2.VideoWriter(
save_path, cv2.VideoWriter_fourcc(*"mp4v"), fps, (int(width), int(height))
)
while True:
ret_val, frame = cap.read()
if ret_val:
result_frame = inference(args, frame)
cv2.imshow('frame',result_frame)
vid_writer.write(result_frame)
ch = cv2.waitKey(1)
if ch == 27 or ch == ord("q") or ch == ord("Q"):
break
else:
break

if __name__ == '__main__':
args = make_parser().parse_args()
if args.mode == "image":
image_process(args)
elif args.mode == "video" or args.mode == "webcam":
imageflow_demo(args)



110 changes: 110 additions & 0 deletions Inference2c.ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1,110 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# [Loading a TorchScript Model in C++](https://pytorch.org/tutorials/advanced/cpp_export.html#loading-a-torchscript-model-in-c)"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"import torch\n",
"import torchvision\n",
"\n",
"# An instance of your model.\n",
"model = torchvision.models.resnet18()\n",
"\n",
"# An example input you would normally provide to your model's forward() method.\n",
"example = torch.rand(1, 3, 224, 224)\n",
"\n",
"# Use torch.jit.trace to generate a torch.jit.ScriptModule via tracing.\n",
"traced_script_module = torch.jit.trace(model, example)"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"tensor([ 0.1552, 0.8421, -0.7267, -0.3862, 0.8198], grad_fn=<SliceBackward0>)"
]
},
"execution_count": 3,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"output = traced_script_module(torch.ones(1, 3, 224, 224))\n",
"output[0, :5]"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
"class MyModule(torch.nn.Module):\n",
" def __init__(self, N, M):\n",
" super(MyModule, self).__init__()\n",
" self.weight = torch.nn.Parameter(torch.rand(N, M))\n",
"\n",
" def forward(self, input):\n",
" if input.sum() > 0:\n",
" output = self.weight.mv(input)\n",
" else:\n",
" output = self.weight + input\n",
" return output\n",
"\n",
"my_module = MyModule(10,20)\n",
"sm = torch.jit.script(my_module)"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [],
"source": [
"traced_script_module.save(\"traced_resnet_model.pt\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "pt39",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.19"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
Loading