Commit a0413783 authored by Sikhin VC's avatar Sikhin VC

initial commit

parent 523631c5
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
*pyc
# C extensions
#*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
.idea/
\ No newline at end of file
stages:
- auto-tagging
- build
- deploy
- update
variables:
MYSQL_CONNECTION: "mysql -h $MYSQL_HOST -u $MYSQL_USER -p$MYSQL_PASS "
# STATUS_SCRIPT: /home/gitlab-runner/monitor/deployment-status.sh
# HELM_CHART: /home/gitlab-runner/kubernetes/ilens/$QA_ENV/ilens-modules
# VARIABLES_YML: variables.yml
# DEPLOYMENT_YML: metadata-services.yml
TIMEOUT: 960s
before_script:
- val=`echo $($MYSQL_CONNECTION -e "SELECT COUNT(*) FROM $VERSION_DB.$DB_TABLE WHERE category='customer-implementations' AND type='AI' AND os='docker' AND module_name='$CI_PROJECT_NAME' ") | cut -d " " -f2`
- if [ $val == 0 ]; then $MYSQL_CONNECTION -e "INSERT INTO $VERSION_DB.$DB_TABLE values('customer-implementations','AI','$CI_PROJECT_NAME','docker', '0', '0', '0', '0')";fi
- QA=$($MYSQL_CONNECTION -N -e "SELECT qa FROM $VERSION_DB.$DB_TABLE where module_name = '$CI_PROJECT_NAME' AND type = 'AI' AND category = 'customer-implementations' AND os = 'docker'")
- DEV=$($MYSQL_CONNECTION -N -e "SELECT dev FROM $VERSION_DB.$DB_TABLE where module_name = '$CI_PROJECT_NAME' AND type = 'AI' AND category = 'customer-implementations' AND os = 'docker'")
- UAT=$(mysql -h $MYSQL_HOST -u $MYSQL_USER -p$MYSQL_PASS -N -e "SELECT uat FROM $VERSION_DB.$DB_TABLE where module_name = '$CI_PROJECT_NAME' AND type = 'AI' AND category = 'customer-implementations' AND os = 'docker'")
- PROD=$($MYSQL_CONNECTION -N -e "SELECT prod FROM $VERSION_DB.$DB_TABLE where module_name = '$CI_PROJECT_NAME' AND type = 'AI' AND category = 'customer-implementations' AND os = 'docker'")
auto-tagging:
stage: auto-tagging
before_script:
- val=`echo $($MYSQL_CONNECTION -e "SELECT COUNT(*) FROM $VERSION_DB.$VERSION_RELEASE_TABLE WHERE module_name='$CI_PROJECT_NAME' ") | cut -d " " -f2`
- if [ $val == 0 ]; then $MYSQL_CONNECTION -N -e "INSERT INTO $VERSION_DB.$VERSION_RELEASE_TABLE values('$CI_PROJECT_NAME', 'iLens', '0', '0', '0', '0')";fi
- ILENS=$($MYSQL_CONNECTION -N -e "SELECT ilens_version FROM "$VERSION_DB.$VERSION_RELEASE_TABLE" where module_name = '$CI_PROJECT_NAME'")
- RELEASE=$($MYSQL_CONNECTION -N -e "SELECT release_version FROM "$VERSION_DB.$VERSION_RELEASE_TABLE" where module_name = '$CI_PROJECT_NAME'")
- FEATURE=$($MYSQL_CONNECTION -N -e "SELECT feature_version FROM "$VERSION_DB.$VERSION_RELEASE_TABLE" where module_name = '$CI_PROJECT_NAME'")
- PATCH=$($MYSQL_CONNECTION -N -e "SELECT patch_version FROM "$VERSION_DB.$VERSION_RELEASE_TABLE" where module_name = '$CI_PROJECT_NAME'")
script:
- SOURCE_BRANCH=$(echo $CI_COMMIT_TITLE | cut -f 3 -d " " | cut -f 1 -d "/" | cut -f 2 -d "'")
- >
if [ "$SOURCE_BRANCH" = "QA" ]; then
((RELEASE=RELEASE+1)) && FEATURE=0 && PATCH=0;
TAG_NAME=v$RELEASE.$FEATURE
IMAGE_URL=azracrilensai.azurecr.io/repository/ilens-ai/customer-implementations/acc/$CI_PROJECT_NAME:"$TAG_NAME"
PROD=$RELEASE; QA=0; DEV=0;
$MYSQL_CONNECTION -e "UPDATE $VERSION_DB.$DB_TABLE SET prod='$PROD' ,qa='$QA', dev='$DEV' WHERE module_name='$CI_PROJECT_NAME' AND type='AI' AND category='customer-implementations' AND os='docker'"
elif [ $SOURCE_BRANCH == "feature" ]; then
((FEATURE=FEATURE+1)) && PATCH=0;
TAG_NAME=v$RELEASE.$FEATURE
IMAGE_URL=azracrilensai.azurecr.io/repository/ilens-ai/customer-implementations/acc/$CI_PROJECT_NAME:"$TAG_NAME"
elif [ $SOURCE_BRANCH == "patch" ]; then
((PATCH=PATCH+1));
TAG_NAME=v$RELEASE.$FEATURE.$PATCH
IMAGE_URL=azracrilensai.azurecr.io/repository/ilens-ai/customer-implementations/acc/$CI_PROJECT_NAME:"$TAG_NAME"
else
exit 1
fi
- echo -e "\n\nImage:" $IMAGE_URL >> ReleaseNote.txt
- sed -i "1s|^|Version":" $TAG_NAME\n|" ReleaseNote.txt
- sed -i "1s|^|Module Name":" $CI_PROJECT_NAME\n|" ReleaseNote.txt
- docker build -t $IMAGE_URL .
- docker push $IMAGE_URL
- docker rmi --force $IMAGE_URL
- URL=$(echo $CI_PROJECT_URL | sed 's|https://||')
- git remote set-url origin https://$GIT_USRNAME:$GIT_USRPASSWD@$URL
- git config user.email "devopsilens@gmail.com"
- git config user.name "$GIT_USRNAME"
- git tag -a $TAG_NAME -F ReleaseNote.txt
- git push origin $TAG_NAME
- $MYSQL_CONNECTION -e "UPDATE $VERSION_DB.$VERSION_RELEASE_TABLE SET release_version='$RELEASE', feature_version='$FEATURE', patch_version='$PATCH' WHERE module_name = '$CI_PROJECT_NAME' "
- $MYSQL_CONNECTION -e "INSERT INTO $HISTORY_DB.$VERSION_RELEASE_TABLE values('$CI_JOB_ID', '$CI_PROJECT_NAME','iLens', '$ILENS.$RELEASE.$FEATURE', '$CI_COMMIT_SHA', '$GITLAB_USER_NAME', '$CI_COMMIT_REF_NAME')"
tags:
- shell
only:
- master
#~~~~~| CODE QUALITY |~~~~~#
codequality:
stage: deploy
image: azacrknowledgelens.azurecr.io/knowledgelens/klit-operation/devops/gitlab-runner:ubuntu-sonarscanner
script:
- /opt/sonar-scanner/bin/sonar-scanner -Dsonar.projectKey=$CI_PROJECT_NAME -Dsonar.projectName=$CI_PROJECT_NAME -Dsonar.typescript.node=./node/node -Dsonar.login=admin -Dsonar.password=admin -Dsonar.sources=.
- sleep 5
- python3 /opt/code_quality_report/static_code_quality_report_csv_v2.py $CI_PROJECT_NAME $GITLAB_USER_EMAIL,$EMAIL_TO $EMAIL_FROM $EMAIL_PASSWD False admin admin
only:
- develop
tags:
- docker
File added
FROM nvcr.io/nvidia/l4t-pytorch:r32.5.0-pth1.7-py3
RUN apt-get update
RUN apt install tzdata ffmpeg libsm6 libxext6 -y
RUN python3 -m pip install --upgrade pip
RUN python3 -m pip install scipy==1.5.4
ADD . /app
WORKDIR /app
RUN python3 -m pip install -r requirements.txt
CMD ["python3","app.py"]
FROM bagcounting:v1
ADD . /app
WORKDIR /app
CMD ["python3","app.py"]
# engine_file_creation_and_counting
# jk_edge_code_api_integration
import os
os.environ["config"]="{\"DEPLOYMENT_ID\": \"pptestcam2_d7a0b6ec\", \"GET_JANUS_DETAILS\":\"http://192.168.3.181/camera_api/custom/get_janus?deploymentId=\", \"MONGO_URI\":\"http://192.168.3.181/camera_api/events/add_event\"}"
from edge_engine.edge_processor import ExecutePipeline
from edge_engine.edge_processor import Pubs
from edge_engine.model_optimizer import ModelOptimization
from scripts import CementBagCounter
from edge_engine.common.config import EDGE_CONFIG
if __name__ == '__main__':
model_optimization = ModelOptimization(num_class=1, weight_path="/app/data/best.wts", image_size=416)
model_optimization.change_configurations()
model_optimization.optimize_model()
pubs = Pubs()
mod = CementBagCounter(config=EDGE_CONFIG,
model_config=EDGE_CONFIG["modelConfig"],
pubs=pubs,
device_id=EDGE_CONFIG['deviceId'])
ex = ExecutePipeline(mod)
ex.run_model()
source /opt/intel/openvino/bin/setupvars.sh
python3 app.py
\ No newline at end of file
This diff is collapsed.
version: "3"
services:
auto-loader:
image: autoloader:v1.7
build:
context: .
dockerfile: Dockerfile
restart: unless-stopped
# environment:
# - config=${config}
# - .env
volumes:
- ./data_mnt:/home/svc-ilens/vid/micro-nvr/iLensEdgeCameraRecordings
- ./archive:/app/archive
from edge_engine.ai.model.modelwraper import ModelWrapper
from abc import ABC, abstractmethod
class ModelWrapper(ABC):
def __init__(self, path=None):
"""Implement code to load mask_model here"""
pass
def _pre_process(self, x):
"""Implement code to process raw input into format required for mask_model inference here"""
return x
def _post_process(self, x):
"""Implement any code to post-process mask_model inference response here"""
return x
@abstractmethod
def _predict(self, x):
"""Implement core mask_model inference code here"""
pass
def predict(self, x):
pre_x = self._pre_process(x)
prediction = self._predict(pre_x)
result = self._post_process(prediction)
return result
# import the necessary packages
import cv2
import numpy as np
class GammaPreprocessor:
def __init__(self, gamma=1.0):
# creating Gamma table
self.invGamma = 1.0 / gamma
self.table = np.array([((i / 255.0) ** self.invGamma) * 255
for i in np.arange(0, 256)]).astype("uint8")
def preprocess(self, image):
return cv2.LUT(image, self.table)
# import the necessary packages
from keras.preprocessing.image import img_to_array
class ImageToArrayPreprocessor:
def __init__(self, dataFormat=None):
# store the image data format
self.dataFormat = dataFormat
def preprocess(self, image):
# apply the Keras utility function that correctly rearranges
# the dimensions of the image
return img_to_array(image, data_format=self.dataFormat)
# import the necessary packages
import cv2
class SimpleHistogramPreprocessor:
def __init__(self):
pass
def preprocess(self, image):
# Run Histogram simple Equalization
return cv2.equalizeHist(image)
# import the necessary packages
import cv2
class SimplePreprocessor:
def __init__(self, width, height, inter=cv2.INTER_AREA):
# store the target image width, height, and interpolation
# method used when resizing
self.width = width
self.height = height
self.inter = inter
def preprocess(self, image):
# resize the image to a fixed size, ignoring the aspect
# ratio
return cv2.resize(image, (self.width, self.height),
interpolation=self.inter)
import os
import sys
from dateutil import parser
from datetime import datetime
import json
import requests
def licence_validator(payload):
try:
dt = parser.parse(payload['valid_till'])
now = datetime.now()
if (now > dt):
sys.stdout.write("Licence Expired \n".format())
sys.stdout.flush()
return False
return True
except KeyError as e:
sys.stderr.write("Error loading licence")
return False
LOG_LEVEL = os.environ.get("LOG_LEVEL", default="INFO").upper()
LOG_HANDLER_NAME = os.environ.get("LOG_HANDLER_NAME", default="ilens-edge_engine")
BASE_LOG_PATH = os.environ.get('BASE_LOG_PATH',
default=os.path.join(os.getcwd(), "logs".format()))
if not os.path.isdir(BASE_LOG_PATH):
os.mkdir(BASE_LOG_PATH)
CONFIG_ENV = json.loads(os.environ.get('config', default=None))
print(CONFIG_ENV)
DEPLOYMENT_ID = CONFIG_ENV.get('DEPLOYMENT_ID',None)
print(DEPLOYMENT_ID)
GET_JANUS_DETAILS = CONFIG_ENV.get('GET_JANUS_DETAILS',None)
MONGO_URI_ADD_EVENTLOGS = CONFIG_ENV.get('MONGO_URI',None)
url = GET_JANUS_DETAILS + DEPLOYMENT_ID
EDGE_CONFIG = requests.get(url).json()["data"]
LAST_COUNT = requests.get(url).json()["cement_bag_count"]
if not LAST_COUNT:
LAST_COUNT = 0
DEVICE_ID = EDGE_CONFIG["deviceId"]
DATA_PATH = EDGE_CONFIG["inputConf"].get('dataPath',os.path.join(os.getcwd(), "data".format()))
sys.stderr.write("Loading data from {} \n".format(DATA_PATH))
\ No newline at end of file
This diff is collapsed.
class LicenseModule:
private_key = "3139343831323738414d47454e3936363538373136"
encoding_algorithm = "HS256"
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from edge_engine.common.config import LOG_LEVEL, LOG_HANDLER_NAME, BASE_LOG_PATH
import logging
from logging.handlers import RotatingFileHandler
from logging import WARNING,INFO,DEBUG,ERROR
import os
DEFAULT_FORMAT = '%(asctime)s %(levelname)5s %(name)s %(message)s'
DEBUG_FORMAT = '%(asctime)s %(levelname)5s %(name)s [%(threadName)5s:%(filename)5s:%(funcName)5s():%(lineno)s] %(message)s'
EXTRA = {}
FORMATTER = DEFAULT_FORMAT
if LOG_LEVEL.strip() == "DEBUG":
FORMATTER = DEBUG_FORMAT
def get_logger(log_handler_name, extra=EXTRA):
"""
Purpose : To create logger .
:param log_handler_name: Name of the log handler.
:param extra: extra args for the logger
:return: logger object.
"""
log_path = os.path.join(BASE_LOG_PATH, log_handler_name + ".log")
logstash_temp = os.path.join(BASE_LOG_PATH, log_handler_name + ".db")
logger = logging.getLogger(log_handler_name)
logger.setLevel(LOG_LEVEL.strip().upper())
log_handler = logging.StreamHandler()
log_handler.setLevel(LOG_LEVEL)
formatter = logging.Formatter(FORMATTER)
log_handler.setFormatter(formatter)
handler = RotatingFileHandler(log_path, maxBytes=10485760,
backupCount=5)
handler.setFormatter(formatter)
logger.addHandler(log_handler)
logger.addHandler(handler)
logger = logging.LoggerAdapter(logger, extra)
return logger
logger = get_logger(LOG_HANDLER_NAME)
import os, time
from minio import Minio
from edge_engine.common.logsetup import logger
class MinioClient:
def __init__(self ,SECRET_KEY, ACCESS_KEY, BUCKET_NAME, LOCAL_DATA_PATH, MINIO_IP):
logger.info("Initalizing minioclient !!")
self.SECRET_KEY = SECRET_KEY
self.ACCESS_KEY = ACCESS_KEY
self.BUCKET_NAME = BUCKET_NAME
self.LOCAL_DATA_PATH = LOCAL_DATA_PATH
self.MINIO_IP = MINIO_IP
self.logfile = "./logs/videowrite.log"
self.minioClient = self.connect_to_minio()
self.create_bucket(self.BUCKET_NAME)
def connect_to_minio(self):
if self.SECRET_KEY is not None and self.ACCESS_KEY is not None:
logger.info("Connecting to Minio Service... !!! ")
minio_client = Minio(self.MINIO_IP, access_key = self.ACCESS_KEY, secret_key = self.SECRET_KEY,
region='us-east-1', secure=False)
return minio_client
else:
logger.info('Access Key and Secret Key String cannot be null')
raise Exception('Access Key and Secret Key String cannot be null')
def create_bucket(self, bucket_name):
try:
if bucket_name not in self.list_buckets():
logger.info("Creating bucket {}...".format(bucket_name))
self.minioClient.make_bucket(bucket_name, location="us-east-1")
else:
logger.info("Bucket already exists....")
except Exception as err:
logger.error(err)
def save_to_bucket(self, bucket_name, data_obj):
try:
with open(data_obj, 'rb') as file:
file_stat = os.stat(data_obj)
self.minioClient.put_object(bucket_name, data_obj.split(self.LOCAL_DATA_PATH)[1],
file, file_stat.st_size)
except Exception as err:
logger.error(err)
def list_buckets(self):
bucketobjects = self.minioClient.list_buckets()
bucketlist = []
for eachbucket in bucketobjects:
bucketlist.append(eachbucket.name)
return bucketlist
def read_write_logs(self):
try:
f = open(self.logfile)
except Exception as err:
print(err)
with open(self.logfile, "a") as startfile:
startfile.write("")
f = open(self.logfile)
return [line.split('\n')[0] for line in f]
def write_write_logs(self, log_str):
with open(self.logfile, "a") as my_file:
my_file.write(log_str + "\n")
def upload(self):
if self.LOCAL_DATA_PATH[-1]!='/':
self.LOCAL_DATA_PATH = self.LOCAL_DATA_PATH+"/"
while True:
listoffiles = [os.path.join(path, name) for path, subdirs, files in os.walk(self.LOCAL_DATA_PATH) for name in files]
listofwrittenfiles = self.read_write_logs()
listofnewfiles = list(set(listoffiles) - set(listofwrittenfiles))
for fileName in listofnewfiles:
try:
logger.info("Uploading {}..".format(fileName.split(self.LOCAL_DATA_PATH)[1]))
self.save_to_bucket(self.BUCKET_NAME, fileName)
self.write_write_logs(fileName)
except Exception as e:
logger.error(e)
time.sleep(5)
# if __name__=='__main__':
# SECRET_KEY = 'minioadmin'
# ACCESS_KEY = 'minioadmin'
# BUCKET_NAME = 'videobucket'
# MINIO_IP = '192.168.3.220:29000'
# LOCAL_DATA_PATH = "F:/GDrive Data/Downloads"
# obj = MinioClient(SECRET_KEY, ACCESS_KEY, BUCKET_NAME, LOCAL_DATA_PATH, MINIO_IP)
# obj.upload()
from edge_engine.common.logsetup import logger
from edge_engine.common.config import EDGE_CONFIG
from edge_engine.streamio.datastream import MQTT
from edge_engine.streamio.datastream import VideoOutputStream
from edge_engine.streamio.datastream import FrameOutputStream
from edge_engine.streamio.datastream import FFMPEGOutputStream
from edge_engine.streamio.datastream import MongoDataStreamOut
from edge_engine.streamio.videostream import ThreadedVideoStream
from edge_engine.streamio.videostream import FileVideoStream
from edge_engine.streamio.frameProcessor import FrameProcessor, FrameProcessorv2
from edge_engine.common.minio_server import MinioClient
import json
from threading import Thread
import time
import os
from edge_engine.streamio.videostream.filepathvideostream import FilePathVideoStream
class Pubs():
def __init__(self):
self.mqtt_pub = None
self.frame_write = None
self.video_write = None
self.mongo_write = None
self.rtp_write = None
self.build_pubs()
if 'minioConfig' in EDGE_CONFIG.keys() and \
isinstance(EDGE_CONFIG["minioConfig"],dict):
self.minio_thread = self.start_minio(EDGE_CONFIG["minioConfig"])
@staticmethod
def start_minio(minio_conf):
obj = MinioClient(minio_conf['secretKey'], minio_conf['accessKey'],
minio_conf['bucketName'], minio_conf['localDataPath'],
minio_conf['ip'])
t = Thread(target=obj.upload)
t.start()
return t
def build_pubs(self):
logger.info("building publishers ")
for conf in EDGE_CONFIG["pubConfigs"]:
if conf["type"].upper() == "MQTT":
self.mqtt_pub = MQTT(broker=conf["broker"], topic=conf["topic"], port=conf["port"]
, publish_hook=json.dumps)
elif conf["type"].upper() == "FRAMEWRITE":
self.frame_write = FrameOutputStream(
basepath=conf["basepath"],
iformat=conf["iformat"],
filenameFormat=conf["filenameFormat"],
publish_hook=None)
elif conf["type"].upper() == "VIDEOWRITE":
self.video_write = VideoOutputStream(basepath=conf["basepath"],
dims=conf["dims"],
filenameFormat=conf["filenameFormat"],
fps=conf["fps"], publish_hook=None)
elif conf["type"].upper() == "MONGO":
self.mongo_write = MongoDataStreamOut(host=conf["host"],
port=conf["port"],
dbname=conf["dbname"],
collection=conf["collection"],
keys=conf["keys"],
authsource=conf["authSource"],
username=conf["username"],
password=conf["password"],
publish_hook=None)
elif conf["type"].upper() == "RTP":
self.rtp_write = FFMPEGOutputStream(conf["ffmpegCmd"], conf["RTPEndpoint"]
, publish_hook=None)
else:
logger.error("Unsupported publisher {}".format(conf["type"]))
class ExecutePipeline:
def __init__(self, model):
self.model = model
def run_model(self):
if EDGE_CONFIG["inputConf"]["sourceType"].lower() in ["rtsp", "usbcam"]:
logger.info("Selected input stream as Direct cv input")
self.threadedVideoStream = ThreadedVideoStream(stream_config=EDGE_CONFIG["inputConf"])
self.threadedVideoStream.start()
self.frameProcessor = FrameProcessor(stream=self.threadedVideoStream,
model=self.model)
elif EDGE_CONFIG["inputConf"]["sourceType"].lower() == "videofile":
self.fileVideoStream = FileVideoStream(stream_config=EDGE_CONFIG["inputConf"])
self.fileVideoStream.start()
self.frameProcessor = FrameProcessor(stream=self.fileVideoStream, model=self.model)
elif EDGE_CONFIG["inputConf"]["sourceType"].lower() == "videopath":
processed_videos = []
videopath = EDGE_CONFIG["inputConf"].get('uri',"")
while True:
time1= time.time()
video_files = os.listdir(videopath)
if not video_files:
time.sleep(0.01)
continue
video_files.sort()
for video in video_files:
if video in processed_videos:
continue
if not video.endswith(".mp4"):
processed_videos.append(video)
continue
processed_videos.append(video)
EDGE_CONFIG["inputConf"]['uri'] = os.path.join(videopath, video)
self.fileVideoStream = FilePathVideoStream(EDGE_CONFIG["inputConf"]['uri'])
self.fileVideoStream.start()
self.frameProcessor = FrameProcessorv2(stream=self.fileVideoStream, model=self.model)
self.start_model()
logger.info("---------------")
logger.info(time.time()-time1)
# logger.info(int(time.time())-time2)
# logger.info(int(time.time())-time3)
logger.info("---------------")
else:
raise ValueError("unsupported source {}".format(EDGE_CONFIG["inputConf"]["sourceType"]))
self.start_model()
def start_model(self):
self.frameProcessor.run_model()
import os
import subprocess
from loguru import logger
import shutil
class ModelOptimization:
def __init__(self, num_class, weight_path, image_size=416):
self.num_class = num_class
self.image_size = image_size
self.weight_path = weight_path
def change_configurations(self):
logger.info(f"Provided number of classes and image size are : {self.num_class} and {self.image_size}")
try:
with open('yolov5/yololayer.h', 'r') as file:
# read a list of lines into data
data = file.readlines()
data[19] = f" static constexpr int CLASS_NUM = {self.num_class};\n"
data[20] = f" static constexpr int INPUT_H = {self.image_size};\n"
data[21] = f" static constexpr int INPUT_W = {self.image_size};\n"
# and write everything back
with open('yolov5/yololayer.h', 'w') as file:
file.writelines(data)
logger.info("Successfully changed configurations")
except Exception as e:
logger.info(f"Failed to change configurations : {e}")
def optimize_model(self):
try:
current_directory = os.getcwd()
logger.info(f"Current directory is : {current_directory}")
build_path = os.path.join(current_directory, "yolov5", "build")
if os.path.isdir('yolov5/build'):
logger.info("build directory exists. Removing build directory!!")
shutil.rmtree('yolov5/build')
os.mkdir(build_path)
weight_name_with_extension = os.path.basename(self.weight_path)
weight_name, extension = os.path.splitext(weight_name_with_extension)
src = f"{current_directory}/yolov5/build/"
shutil.copy(self.weight_path, src)
logger.info(f"Created build folder")
os.chdir('yolov5/build')
logger.info("Running CMake command")
subprocess.run(['cmake', '..'])
logger.info("Running Make command")
subprocess.run(['make'])
logger.info("Optimizing model")
engine_name = weight_name + ".engine"
subprocess.run(["sudo", "./yolov5", "-s", weight_name_with_extension, engine_name, "c", "0.33", "0.50"])
engine_file_path = os.path.join(src, engine_name)
shutil.copy(engine_file_path, "/app/data/")
except Exception as e:
logger.info(f"Failed to optimized model : {e}")
from edge_engine.streamio.frameProcessor import FrameProcessor
from .datastreamprocessor import DataStreamProcessor
from edge_engine.streamio.datastream.datastreamwrapper import DataStreamWrapper
from edge_engine.streamio.datastream.mongodatastreamout import MongoDataStreamOut
from edge_engine.streamio.datastream.frameoutputstream import FrameOutputStream
from edge_engine.streamio.datastream.videooutputstream import VideoOutputStream
from edge_engine.streamio.datastream.mqttstream import MQTT
from edge_engine.streamio.datastream.ffmpegdata_streamout import FFMPEGOutputStream
\ No newline at end of file
from abc import ABC, abstractmethod
class DataStreamWrapper(ABC):
def __init__(self):
"""Implement code to load mask_model here"""
pass
def publish(self, x):
"""Implement code to publish"""
return x
def subscribe(self,hook):
"""Implement code to subscribe"""
return None
\ No newline at end of file
from edge_engine.streamio.datastream.datastreamwrapper import DataStreamWrapper
import subprocess as sp
class FFMPEGOutputStream(DataStreamWrapper):
def __init__(self, ffmpeg_cmd, rtp_endpoint, publish_hook=None):
super().__init__()
self.ffmpeg_cmd = ffmpeg_cmd
self.rtp_endpoint = rtp_endpoint
self.ffmpeg_cmd.append(self.rtp_endpoint[0])
self.proc = sp.Popen(self.ffmpeg_cmd, stdin=sp.PIPE, shell=False)
self.publish_hook = publish_hook
def publish(self, x):
if self.publish_hook is not None:
x = self.publish_hook(x)
frame = x["frame"]
self.proc.stdin.write(frame.tostring())
self.proc.stdin.flush()
from edge_engine.streamio.datastream.datastreamwrapper import DataStreamWrapper
import cv2
import base64
import numpy as np
import os
from edge_engine.common.logsetup import logger
from datetime import datetime
class FrameOutputStream(DataStreamWrapper):
def __init__(self, basepath, iformat="jpg", filenameFormat="{deviceId}_{frameId}_{timestamp}", publish_hook=None):
super().__init__()
self.basepath = basepath
self.iformat = iformat
self.filenameFormat = filenameFormat
self.publish_hook = publish_hook
def publish(self, x):
if self.publish_hook is not None:
x= self.publish_hook(x)
frame = x["frame"]
frame = base64.b64decode(frame.split("data:image/jpeg;base64,")[1])
frame = np.fromstring(frame, np.uint8)
frame = cv2.imdecode(frame, cv2.IMREAD_COLOR)
path = os.path.join(self.basepath, datetime.now().date().isoformat())
if not os.path.isdir(path):
logger.info("Creating {} \n".format(path))
os.mkdir(path)
cv2.imwrite("{path}.{iformat}".format(path=os.path.join(path, self.filenameFormat.format(**x)),
iformat=self.iformat), frame)
return True
def subscribe(self, hook):
super().subscribe(hook)
from edge_engine.streamio.datastream.datastreamwrapper import DataStreamWrapper
from pymongo import MongoClient
class MongoDataStreamOut(DataStreamWrapper):
def __init__(self, host, port, dbname, collection, keys, authsource,username=None,password=None, publish_hook=None):
super().__init__()
self.host = host
self.port = port
self.dbname = dbname
self.username = username
self.password = password
self.collection = collection
self.publish_hook = publish_hook
self.mongo = MongoClient(host=host,
port=int(port),username=self.username,password=self.password)
self.db = self.mongo[dbname]
self.keys = keys
self.authsource = authsource
def subscribe(self, hook=None):
pass
def publish(self, data):
if self.publish_hook is not None:
data = self.publish_hook(data)
fin_dat = {}
for k, v in data.items():
if k in self.keys:
fin_dat[k] = v
self.db[self.collection].insert(fin_dat)
import paho.mqtt.client as paho
from edge_engine.streamio.datastream.datastreamwrapper import DataStreamWrapper
from edge_engine.common.logsetup import logger
from uuid import uuid4
import traceback
class MQTT(DataStreamWrapper):
@staticmethod
def on_connect(client, userdata, flags, rc):
logger.info("Connection returned with result code:" + str(rc))
@staticmethod
def on_disconnect(client, userdata, rc):
logger.info("Disconnection returned result:" + str(rc))
@staticmethod
def on_subscribe(client, userdata, mid, granted_qos):
logger.debug("Subscribing MQTT {} {} {} {}".format(client, userdata, mid, granted_qos))
def on_message(self, client, userdata, msg):
logger.debug("Received message, topic:" + msg.topic + "payload:" + str(msg.payload))
if self.subscribe_hook is not None:
self.subscribe_hook(msg.payload.decode())
def __init__(self, broker, port, topic, qos=2, subscribe_hook=None, publish_hook=None):
super().__init__()
self.broker = broker
self.port = int(port)
self.topic = topic
self.client_name = "{}".format(uuid4())
self.client = paho.Client(self.client_name)
self.client.on_connect = self.on_connect
self.client.on_disconnect = self.on_disconnect
self.client.on_subscribe = self.on_subscribe
self.client.on_message = self.on_message
self.client.connect(host=self.broker, port=self.port)
self.subscribe_hook = subscribe_hook
self.publish_hook = publish_hook
self.qos = qos
def subscribe(self, hook=None):
if hook is not None:
self.subscribe_hook =hook
self.client.subscribe((self.topic, self.qos))
self.client.loop_forever()
def publish(self, data):
try:
if self.publish_hook is not None:
data = self.publish_hook(data)
self.client.publish(self.topic, data)
except Exception as e:
logger.error(e)
logger.error(traceback.format_exc())
from edge_engine.streamio.datastream.datastreamwrapper import DataStreamWrapper
import cv2
import base64
import numpy as np
import os
from edge_engine.common.logsetup import logger
from datetime import datetime
class VideoOutputStream(DataStreamWrapper):
def __init__(self, basepath, dims, filenameFormat="{deviceId}_{timestamp}", fps=30, publish_hook=None):
super().__init__()
self.basepath = basepath
self.dims = (int(dims[0]),int(dims[1]))
self.fps = float(fps)
self.filenameFormat = filenameFormat
self.publish_hook = publish_hook
self.four_cc = cv2.VideoWriter_fourcc(*'mp4v')
self.out = None
def publish(self, x):
if self.publish_hook is not None:
x = self.publish_hook(x)
if len(x["metric"]) > 0:
if self.out is None:
path = os.path.join(self.basepath, datetime.now().date().isoformat())
if not os.path.isdir(path):
logger.info("Creating {} \n".format(path))
os.mkdir(path)
self.out = cv2.VideoWriter("{}.mp4".format(os.path.join(path, self.filenameFormat.format(**x))),
self.four_cc, self.fps, self.dims)
frame = x["frame"]
frame = base64.b64decode(frame.split("data:image/jpeg;base64,")[1])
frame = np.fromstring(frame, np.uint8)
frame = cv2.imdecode(frame, cv2.IMREAD_COLOR)
self.out.write(frame)
else:
if self.out is not None:
self.out.release()
self.out = None
return True
def subscribe(self, hook):
super().subscribe(hook)
from edge_engine.common.logsetup import logger
class DataStreamProcessor:
def __init__(self, model, subsciber, publishers=list()):
self.model = model
self.subsciber = subsciber
self.publishers = publishers
logger.info("Setting up frame processor !!")
def processstream(self, msg):
print(msg)
def run_model(self):
self.subsciber.subscribe(hook=self.processstream)
from edge_engine.common.logsetup import logger
from edge_engine.common.config import DEVICE_ID
from uuid import uuid4
import traceback
class FrameProcessor:
def __init__(self, stream, model):
self.model = model
self.stream = stream
logger.info("Setting up frame processor !!")
def run_model(self):
while self.stream.stream.isOpened():
try:
logger.debug("Getting frame mask_model")
frame = self.stream.read()
logger.debug("Running mask_model")
data = {
"frame": frame,
"frameId": "{}".format(uuid4()),
"deviceId": "{}".format(DEVICE_ID),
}
self.model.predict(data)
logger.debug("publishing mask_model output")
except Exception as e:
logger.error(e)
logger.error(traceback.format_exc())
class FrameProcessorv2:
def __init__(self, stream, model):
self.model = model
self.stream = stream
self.count = 0
self.skip_frame_every = 3 # 1 does not skip any frame (n-1 frames get skipped)
def run_model(self):
while self.stream.running():
try:
frame = self.stream.read()
self.count += 1
if frame is not None and self.count % self.skip_frame_every == 0:
fid = uuid4()
data = {
"frame": frame,
"frameId": "{}".format(fid),
"deviceId": "{}".format(DEVICE_ID),
}
self.model.predict(data)
except Exception as e:
logger.error(e)
logger.error(traceback.format_exc())
self.model.running = False
from edge_engine.streamio.videostream.fps import FPS
from edge_engine.streamio.videostream.nvgstreamer import NVGstreamer
from edge_engine.streamio.videostream.simplevideostream import SimpleVideoStream
from edge_engine.streamio.videostream.threadedvideostream import ThreadedVideoStream
from edge_engine.streamio.videostream.filevideostream import FileVideoStream
\ No newline at end of file
# import the necessary packages
from threading import Thread
import sys
import cv2
import time
import os
import shutil
import imutils
# import the Queue class from Python 3
if sys.version_info >= (3, 0):
from queue import Queue
# otherwise, import the Queue class for Python 2.7
else:
from Queue import Queue
def frame_transform(frame):
frame = imutils.resize(frame, width=416, height=416)
return frame
class FilePathVideoStream:
def __init__(self, path, transform=None, queue_size=256):
# initialize the file video stream along with the boolean
# used to indicate if the thread should be stopped or not
self.path=path
while True:
self.stream = cv2.VideoCapture(path)
if not self.stream.isOpened():
time.sleep(0.01)
continue
break
self.stopped = False
self.transform = transform
# initialize the queue used to store frames read from
# the video file
self.Q = Queue(maxsize=queue_size)
# intialize thread
self.thread = Thread(target=self.update, args=())
self.thread.daemon = True
def start(self):
# start a thread to read frames from the file video stream
self.thread.start()
return self
def update(self):
# keep looping infinitely
while True:
# if the thread indicator variable is set, stop the
# thread
if self.stopped:
break
# otherwise, ensure the queue has room in it
if not self.Q.full():
# read the next frame from the file
(grabbed, frame) = self.stream.read()
# if the `grabbed` boolean is `False`, then we have
# reached the end of the video file
if not grabbed:
self.stopped = True
# if there are transforms to be done, might as well
# do them on producer thread before handing back to
# consumer thread. ie. Usually the producer is so far
# ahead of consumer that we have time to spare.
#
# Python is not parallel but the transform operations
# are usually OpenCV native so release the GIL.
#
# Really just trying to avoid spinning up additional
# native threads and overheads of additional
# producer/consumer queues since this one was generally
# idle grabbing frames.
if self.transform:
frame = self.transform(frame)
# add the frame to the queue
self.Q.put(frame)
else:
time.sleep(0.1) # Rest for 10ms, we have a full queue
self.stream.release()
def read(self):
# return next frame in the queue
return self.Q.get()
# Insufficient to have consumer use while(more()) which does
# not take into account if the producer has reached end of
# file stream.
def running(self):
# custom addition to this func
cond = self.more() or not self.stopped
if not cond:
self.move_file()
return cond
def more(self):
# return True if there are still frames in the queue. If stream is not stopped, try to wait a moment
tries = 0
while self.Q.qsize() == 0 and not self.stopped and tries < 5:
time.sleep(0.1)
tries += 1
return self.Q.qsize() > 0
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
# wait until stream resources are released (producer thread might be still grabbing frame)
self.thread.join()
def move_file(self):
src_path = self.path
dest_path = "archive"
root_path = os.path.dirname(os.path.abspath(__file__))
src = os.path.join(root_path, src_path)
if not os.path.exists(dest_path):
os.mkdir(dest_path)
shutil.move(src, dest_path)
# import the necessary packages
from threading import Thread
import sys
import cv2
import time
# import the Queue class from Python 3
if sys.version_info >= (3, 0):
from queue import Queue
# otherwise, import the Queue class for Python 2.7
else:
from Queue import Queue
class FileVideoStream:
def __init__(self,stream_config, transform=None):
# initialize the file video stream along with the boolean
# used to indicate if the thread should be stopped or not
self.transform = transform
self.stream_config =stream_config
# initialize the queue used to store frames read from
# the video file
self.build_pipeline()
def start(self):
# start a thread to read frames from the file video stream
self.thread.start()
return self
def build_cv_obj(self):
self.stream = cv2.VideoCapture(self.stream_config["uri"])
self.stopped = False
def build_pipeline(self):
self.build_cv_obj()
if "queueSize" not in self.stream_config:
self.stream_config["queueSize"] =128
self.Q = Queue(maxsize=int(self.stream_config["queueSize"]))
# intialize thread
self.thread = Thread(target=self.update, args=())
self.thread.daemon = True
def is_opened(self):
return self.stream.isOpened()
def update(self):
# keep looping infinitely
while True:
# if the thread indicator variable is set, stop the
# thread
if self.stopped:
break
# otherwise, ensure the queue has room in it
if not self.Q.full():
# read the next frame from the file
(grabbed, frame) = self.stream.read()
# if the `grabbed` boolean is `False`, then we have
# reached the end of the video file
if grabbed is False or frame is None:
#self.stopped = True
self.build_cv_obj()
continue
# if there are transforms to be done, might as well
# do them on producer thread before handing back to
# consumer thread. ie. Usually the producer is so far
# ahead of consumer that we have time to spare.
#
# Python is not parallel but the transform operations
# are usually OpenCV native so release the GIL.
#
# Really just trying to avoid spinning up additional
# native threads and overheads of additional
# producer/consumer queues since this one was generally
# idle grabbing frames.
if self.transform:
frame = self.transform(frame)
# add the frame to the queue
self.Q.put(frame)
else:
time.sleep(0.1) # Rest for 10ms, we have a full queue
self.stream.release()
def read(self):
# return next frame in the queue
return self.Q.get()
# Insufficient to have consumer use while(more()) which does
# not take into account if the producer has reached end of
# file stream.
def running(self):
return self.more() or not self.stopped
def more(self):
# return True if there are still frames in the queue. If stream is not stopped, try to wait a moment
tries = 0
while self.Q.qsize() == 0 and not self.stopped and tries < 5:
time.sleep(0.1)
tries += 1
return self.Q.qsize() > 0
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
# wait until stream resources are released (producer thread might be still grabbing frame)
self.thread.join()
\ No newline at end of file
# import the necessary packages
import datetime
class FPS:
def __init__(self):
# store the start time, end time, and total number of frames
# that were examined between the start and end intervals
self._start = None
self._end = None
self._numFrames = 0
def start(self):
# start the timer
self._start = datetime.datetime.now()
return self
def stop(self):
# stop the timer
self._end = datetime.datetime.now()
def update(self):
# increment the total number of frames examined during the
# start and end intervals
self._numFrames += 1
def elapsed(self):
# return the total number of seconds between the start and
# end interval
return (self._end - self._start).total_seconds()
def fps(self):
# compute the (approximate) frames per second
return self._numFrames / self.elapsed()
import cv2
class NVGstreamer:
def __init__(self, buildconfig):
self.width = 480
self.height = 640
self.latency = 0
self.framerate = "30/1"
self.fformat = "BGRx"
self.BUILD_CONFIG = {
"width": self.width,
"height": self.height,
"latency": self.latency,
"framerate": self.framerate,
"format": self.fformat,
"gstreamer": True
}
self.BUILD_CONFIG.update(buildconfig)
def open_cam_rtsp(self):
gst_str = ('rtspsrc location={uri} latency={latency} ! '
'rtph264depay ! h264parse ! omxh264dec ! '
'nvvidconv ! videorate ! '
'video/x-raw, width=(int){width}, height=(int){height}, '
'format=(string){format}, framerate=(fraction){framerate} ! '
'videoconvert ! appsink').format(**self.BUILD_CONFIG)
print(gst_str)
return cv2.VideoCapture(gst_str, cv2.CAP_GSTREAMER)
def open_cam_usb(self):
# We want to set width and height here, otherwise we could just do:
# return cv2.VideoCapture(dev)
gst_str = ('v4l2src device=/dev/video{uri} ! '
'video/x-raw, width=(int){width}, height=(int){height}, '
'format=(string){format}, framerate=(fraction){framerate} ! '
'videoconvert ! appsink').format(**self.BUILD_CONFIG)
print(gst_str)
return cv2.VideoCapture(gst_str, cv2.CAP_GSTREAMER)
def open_cam_onboard(self):
# On versions of L4T prior to 28.1, add 'flip-method=2' into gst_str
gst_str = ('nvcamerasrc ! '
'video/x-raw(memory:NVMM), '
'width=(int)2592, height=(int)1458, '
'format=(string)I420 ! '
'nvvidconv ! videorate ! '
'video/x-raw, width=(int){width}, height=(int){height}, '
'format=(string){format}, framerate=(fraction){framerate} !'
'videoconvert ! appsink').format(**self.BUILD_CONFIG)
print(gst_str)
return cv2.VideoCapture(gst_str, cv2.CAP_GSTREAMER)
def custom_pipeline(self):
gst_str = "{customGstPipelineString}".format(**self.BUILD_CONFIG)
print(gst_str)
return cv2.VideoCapture(gst_str, cv2.CAP_GSTREAMER)
def build_pipeline(self):
if self.BUILD_CONFIG["gStreamer"]!=True:
if self.BUILD_CONFIG["sourceType"] == "usbcam":
self.cap = cv2.VideoCapture(int(self.BUILD_CONFIG["uri"]))
else:
self.cap = cv2.VideoCapture(self.BUILD_CONFIG["uri"])
elif self.BUILD_CONFIG["sourceType"] == "rtsp":
self.cap = self.open_cam_rtsp()
elif self.BUILD_CONFIG["sourceType"] == "usbcam":
self.cap = self.open_cam_usb()
elif self.BUILD_CONFIG["sourceType"] == "onboard":
self.cap = self.open_cam_onboard()
elif self.BUILD_CONFIG["sourceType"] == "customPipeline":
self.cap = self.custom_pipeline()
else:
raise ValueError("unimplemented source {}".format(self.BUILD_CONFIG["sourceType"]))
def get_stream(self):
return self.cap
from edge_engine.common.logsetup import logger
from edge_engine.streamio.videostream import NVGstreamer
class SimpleVideoStream:
def __init__(self, stream_config, name="SimpleVideoStream"):
self.stream_config = stream_config
self.build_pipeline()
(self.grabbed, self.frame) = self.stream.read()
self.name = name
def build_pipeline(self):
self.gstreamer = NVGstreamer(self.stream_config)
self.gstreamer.build_pipeline()
self.stream = self.gstreamer.get_stream()
def start(self):
logger.info("Starting video stream ")
if self.stream.isOpened():
self.grabbed, self.frame = self.stream.read()
if self.grabbed is False:
logger.error("Empty Frame !!!! ")
logger.error("Error opening Capture !!!! ")
self.build_pipeline()
return self
else:
logger.error("Error opening Capture !!!! ")
self.build_pipeline()
def is_opened(self):
return self.stream.isOpened()
def read(self):
# return the frame most recently read
if self.stream.isOpened():
self.grabbed, self.frame = self.stream.read()
if self.grabbed is False:
logger.error("Empty Frame !!!! ")
raise ValueError("Empty Frame !!!! ")
return self.frame
else:
logger.error("Error opening Capture !!!! ")
raise ValueError("Error opening Capture !!!! ")
def stop(self):
if self.stream.isOpened():
self.stream.release()
# import the necessary packages
from threading import Thread
import time
from edge_engine.streamio.videostream import NVGstreamer
from edge_engine.common.logsetup import logger
class ThreadedVideoStream:
def __init__(self, stream_config, name="ThreadedVideoStream"):
# initialize the video camera stream and read the first frame
# from the stream
self.stream_config = stream_config
self.build_pipeline()
# self.stream = stream
(self.grabbed, self.frame) = self.stream.read()
# initialize the thread name
self.name = name
# initialize the variable used to indicate if the thread should
# be stopped
self.stopped = False
def build_pipeline(self):
self.gstreamer = NVGstreamer(self.stream_config)
self.gstreamer.build_pipeline()
self.stream = self.gstreamer.get_stream()
def start(self):
# start the thread to read frames from the video stream
t = Thread(target=self.update, name=self.name, args=())
t.daemon = True
t.start()
return self
def update(self):
# keep looping infinitely until the thread is stopped
while True:
# if the thread indicator variable is set, stop the thread
if self.stopped:
return
# otherwise, read the next frame from the stream
(self.grabbed, self.frame) = self.stream.read()
if self.grabbed is False or self.frame is None:
logger.error("Empty Frame !!!! ")
logger.error("Error opening Capture !!!! ")
self.build_pipeline()
def read(self):
# return the frame most recently read
return self.frame
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
time.sleep(0.2)
self.stream.release()
# Copyright 2019 KnowledgeLens pvt Ltd.
VERSION = '0.0.1.alpha'
cmake_minimum_required(VERSION 2.6)
project(yolov5)
add_definitions(-std=c++11)
add_definitions(-DAPI_EXPORTS)
option(CUDA_USE_STATIC_CUDA_RUNTIME OFF)
set(CMAKE_CXX_STANDARD 11)
set(CMAKE_BUILD_TYPE Debug)
find_package(CUDA REQUIRED)
if(WIN32)
enable_language(CUDA)
endif(WIN32)
include_directories(${PROJECT_SOURCE_DIR}/include)
# include and link dirs of cuda and tensorrt, you need adapt them if yours are different
# cuda
include_directories(/usr/local/cuda/include)
link_directories(/usr/local/cuda/lib64)
# tensorrt
include_directories(/usr/include/x86_64-linux-gnu/)
link_directories(/usr/lib/x86_64-linux-gnu/)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -Wall -Ofast -g -Wfatal-errors -D_MWAITXINTRIN_H_INCLUDED")
cuda_add_library(myplugins SHARED yololayer.cu)
target_link_libraries(myplugins nvinfer cudart)
find_package(OpenCV)
include_directories(${OpenCV_INCLUDE_DIRS})
cuda_add_executable(yolov5 calibrator.cpp yolov5.cpp preprocess.cu)
target_link_libraries(yolov5 nvinfer)
target_link_libraries(yolov5 cudart)
target_link_libraries(yolov5 myplugins)
target_link_libraries(yolov5 ${OpenCV_LIBS})
if(UNIX)
add_definitions(-O2 -pthread)
endif(UNIX)
# yolov5
The Pytorch implementation is [ultralytics/yolov5](https://github.com/ultralytics/yolov5).
## Different versions of yolov5
Currently, we support yolov5 v1.0, v2.0, v3.0, v3.1, v4.0, v5.0 and v6.0.
- For yolov5 v6.0, download .pt from [yolov5 release v6.0](https://github.com/ultralytics/yolov5/releases/tag/v6.0), `git clone -b v6.0 https://github.com/ultralytics/yolov5.git` and `git clone https://github.com/wang-xinyu/tensorrtx.git`, then follow how-to-run in current page.
- For yolov5 v5.0, download .pt from [yolov5 release v5.0](https://github.com/ultralytics/yolov5/releases/tag/v5.0), `git clone -b v5.0 https://github.com/ultralytics/yolov5.git` and `git clone -b yolov5-v5.0 https://github.com/wang-xinyu/tensorrtx.git`, then follow how-to-run in [tensorrtx/yolov5-v5.0](https://github.com/wang-xinyu/tensorrtx/tree/yolov5-v5.0/yolov5).
- For yolov5 v4.0, download .pt from [yolov5 release v4.0](https://github.com/ultralytics/yolov5/releases/tag/v4.0), `git clone -b v4.0 https://github.com/ultralytics/yolov5.git` and `git clone -b yolov5-v4.0 https://github.com/wang-xinyu/tensorrtx.git`, then follow how-to-run in [tensorrtx/yolov5-v4.0](https://github.com/wang-xinyu/tensorrtx/tree/yolov5-v4.0/yolov5).
- For yolov5 v3.1, download .pt from [yolov5 release v3.1](https://github.com/ultralytics/yolov5/releases/tag/v3.1), `git clone -b v3.1 https://github.com/ultralytics/yolov5.git` and `git clone -b yolov5-v3.1 https://github.com/wang-xinyu/tensorrtx.git`, then follow how-to-run in [tensorrtx/yolov5-v3.1](https://github.com/wang-xinyu/tensorrtx/tree/yolov5-v3.1/yolov5).
- For yolov5 v3.0, download .pt from [yolov5 release v3.0](https://github.com/ultralytics/yolov5/releases/tag/v3.0), `git clone -b v3.0 https://github.com/ultralytics/yolov5.git` and `git clone -b yolov5-v3.0 https://github.com/wang-xinyu/tensorrtx.git`, then follow how-to-run in [tensorrtx/yolov5-v3.0](https://github.com/wang-xinyu/tensorrtx/tree/yolov5-v3.0/yolov5).
- For yolov5 v2.0, download .pt from [yolov5 release v2.0](https://github.com/ultralytics/yolov5/releases/tag/v2.0), `git clone -b v2.0 https://github.com/ultralytics/yolov5.git` and `git clone -b yolov5-v2.0 https://github.com/wang-xinyu/tensorrtx.git`, then follow how-to-run in [tensorrtx/yolov5-v2.0](https://github.com/wang-xinyu/tensorrtx/tree/yolov5-v2.0/yolov5).
- For yolov5 v1.0, download .pt from [yolov5 release v1.0](https://github.com/ultralytics/yolov5/releases/tag/v1.0), `git clone -b v1.0 https://github.com/ultralytics/yolov5.git` and `git clone -b yolov5-v1.0 https://github.com/wang-xinyu/tensorrtx.git`, then follow how-to-run in [tensorrtx/yolov5-v1.0](https://github.com/wang-xinyu/tensorrtx/tree/yolov5-v1.0/yolov5).
## Config
- Choose the model n/s/m/l/x/n6/s6/m6/l6/x6 from command line arguments.
- Input shape defined in yololayer.h
- Number of classes defined in yololayer.h, **DO NOT FORGET TO ADAPT THIS, If using your own model**
- INT8/FP16/FP32 can be selected by the macro in yolov5.cpp, **INT8 need more steps, pls follow `How to Run` first and then go the `INT8 Quantization` below**
- GPU id can be selected by the macro in yolov5.cpp
- NMS thresh in yolov5.cpp
- BBox confidence thresh in yolov5.cpp
- Batch size in yolov5.cpp
## How to Run, yolov5s as example
1. generate .wts from pytorch with .pt, or download .wts from model zoo
```
// clone code according to above #Different versions of yolov5
// download https://github.com/ultralytics/yolov5/releases/download/v6.0/yolov5s.pt
cp {tensorrtx}/yolov5/gen_wts.py {ultralytics}/yolov5
cd {ultralytics}/yolov5
python gen_wts.py -w yolov5s.pt -o yolov5s.wts
// a file 'yolov5s.wts' will be generated.
```
2. build tensorrtx/yolov5 and run
```
cd {tensorrtx}/yolov5/
// update CLASS_NUM in yololayer.h if your model is trained on custom dataset
mkdir build
cd build
cp {ultralytics}/yolov5/yolov5s.wts {tensorrtx}/yolov5/build
cmake ..
make
sudo ./yolov5 -s [.wts] [.engine] [n/s/m/l/x/n6/s6/m6/l6/x6 or c/c6 gd gw] // serialize model to plan file
sudo ./yolov5 -d [.engine] [image folder] // deserialize and run inference, the images in [image folder] will be processed.
// For example yolov5s
sudo ./yolov5 -s yolov5s.wts yolov5s.engine s
sudo ./yolov5 -d yolov5s.engine ../samples
// For example Custom model with depth_multiple=0.17, width_multiple=0.25 in yolov5.yaml
sudo ./yolov5 -s yolov5_custom.wts yolov5.engine c 0.17 0.25
sudo ./yolov5 -d yolov5.engine ../samples
```
3. check the images generated, as follows. _zidane.jpg and _bus.jpg
4. optional, load and run the tensorrt model in python
```
// install python-tensorrt, pycuda, etc.
// ensure the yolov5s.engine and libmyplugins.so have been built
python yolov5_trt.py
// Another version of python script, which is using CUDA Python instead of pycuda.
python yolov5_trt_cuda_python.py
```
# INT8 Quantization
1. Prepare calibration images, you can randomly select 1000s images from your train set. For coco, you can also download my calibration images `coco_calib` from [GoogleDrive](https://drive.google.com/drive/folders/1s7jE9DtOngZMzJC1uL307J2MiaGwdRSI?usp=sharing) or [BaiduPan](https://pan.baidu.com/s/1GOm_-JobpyLMAqZWCDUhKg) pwd: a9wh
2. unzip it in yolov5/build
3. set the macro `USE_INT8` in yolov5.cpp and make
4. serialize the model and test
<p align="center">
<img src="https://user-images.githubusercontent.com/15235574/78247927-4d9fac00-751e-11ea-8b1b-704a0aeb3fcf.jpg">
</p>
<p align="center">
<img src="https://user-images.githubusercontent.com/15235574/78247970-60b27c00-751e-11ea-88df-41473fed4823.jpg">
</p>
## More Information
See the readme in [home page.](https://github.com/wang-xinyu/tensorrtx)
#include <iostream>
#include <iterator>
#include <fstream>
#include <opencv2/dnn/dnn.hpp>
#include "calibrator.h"
#include "cuda_utils.h"
#include "utils.h"
Int8EntropyCalibrator2::Int8EntropyCalibrator2(int batchsize, int input_w, int input_h, const char* img_dir, const char* calib_table_name, const char* input_blob_name, bool read_cache)
: batchsize_(batchsize)
, input_w_(input_w)
, input_h_(input_h)
, img_idx_(0)
, img_dir_(img_dir)
, calib_table_name_(calib_table_name)
, input_blob_name_(input_blob_name)
, read_cache_(read_cache)
{
input_count_ = 3 * input_w * input_h * batchsize;
CUDA_CHECK(cudaMalloc(&device_input_, input_count_ * sizeof(float)));
read_files_in_dir(img_dir, img_files_);
}
Int8EntropyCalibrator2::~Int8EntropyCalibrator2()
{
CUDA_CHECK(cudaFree(device_input_));
}
int Int8EntropyCalibrator2::getBatchSize() const TRT_NOEXCEPT
{
return batchsize_;
}
bool Int8EntropyCalibrator2::getBatch(void* bindings[], const char* names[], int nbBindings) TRT_NOEXCEPT
{
if (img_idx_ + batchsize_ > (int)img_files_.size()) {
return false;
}
std::vector<cv::Mat> input_imgs_;
for (int i = img_idx_; i < img_idx_ + batchsize_; i++) {
std::cout << img_files_[i] << " " << i << std::endl;
cv::Mat temp = cv::imread(img_dir_ + img_files_[i]);
if (temp.empty()){
std::cerr << "Fatal error: image cannot open!" << std::endl;
return false;
}
cv::Mat pr_img = preprocess_img(temp, input_w_, input_h_);
input_imgs_.push_back(pr_img);
}
img_idx_ += batchsize_;
cv::Mat blob = cv::dnn::blobFromImages(input_imgs_, 1.0 / 255.0, cv::Size(input_w_, input_h_), cv::Scalar(0, 0, 0), true, false);
CUDA_CHECK(cudaMemcpy(device_input_, blob.ptr<float>(0), input_count_ * sizeof(float), cudaMemcpyHostToDevice));
assert(!strcmp(names[0], input_blob_name_));
bindings[0] = device_input_;
return true;
}
const void* Int8EntropyCalibrator2::readCalibrationCache(size_t& length) TRT_NOEXCEPT
{
std::cout << "reading calib cache: " << calib_table_name_ << std::endl;
calib_cache_.clear();
std::ifstream input(calib_table_name_, std::ios::binary);
input >> std::noskipws;
if (read_cache_ && input.good())
{
std::copy(std::istream_iterator<char>(input), std::istream_iterator<char>(), std::back_inserter(calib_cache_));
}
length = calib_cache_.size();
return length ? calib_cache_.data() : nullptr;
}
void Int8EntropyCalibrator2::writeCalibrationCache(const void* cache, size_t length) TRT_NOEXCEPT
{
std::cout << "writing calib cache: " << calib_table_name_ << " size: " << length << std::endl;
std::ofstream output(calib_table_name_, std::ios::binary);
output.write(reinterpret_cast<const char*>(cache), length);
}
#ifndef ENTROPY_CALIBRATOR_H
#define ENTROPY_CALIBRATOR_H
#include <NvInfer.h>
#include <string>
#include <vector>
#include "macros.h"
//! \class Int8EntropyCalibrator2
//!
//! \brief Implements Entropy calibrator 2.
//! CalibrationAlgoType is kENTROPY_CALIBRATION_2.
//!
class Int8EntropyCalibrator2 : public nvinfer1::IInt8EntropyCalibrator2
{
public:
Int8EntropyCalibrator2(int batchsize, int input_w, int input_h, const char* img_dir, const char* calib_table_name, const char* input_blob_name, bool read_cache = true);
virtual ~Int8EntropyCalibrator2();
int getBatchSize() const TRT_NOEXCEPT override;
bool getBatch(void* bindings[], const char* names[], int nbBindings) TRT_NOEXCEPT override;
const void* readCalibrationCache(size_t& length) TRT_NOEXCEPT override;
void writeCalibrationCache(const void* cache, size_t length) TRT_NOEXCEPT override;
private:
int batchsize_;
int input_w_;
int input_h_;
int img_idx_;
std::string img_dir_;
std::vector<std::string> img_files_;
size_t input_count_;
std::string calib_table_name_;
const char* input_blob_name_;
bool read_cache_;
void* device_input_;
std::vector<char> calib_cache_;
};
#endif // ENTROPY_CALIBRATOR_H
This diff is collapsed.
#ifndef TRTX_CUDA_UTILS_H_
#define TRTX_CUDA_UTILS_H_
#include <cuda_runtime_api.h>
#ifndef CUDA_CHECK
#define CUDA_CHECK(callstr)\
{\
cudaError_t error_code = callstr;\
if (error_code != cudaSuccess) {\
std::cerr << "CUDA error " << error_code << " at " << __FILE__ << ":" << __LINE__;\
assert(0);\
}\
}
#endif // CUDA_CHECK
#endif // TRTX_CUDA_UTILS_H_
import sys
import argparse
import os
import struct
import torch
from utils.torch_utils import select_device
def parse_args():
parser = argparse.ArgumentParser(description='Convert .pt file to .wts')
parser.add_argument('-w', '--weights', required=True, help='Input weights (.pt) file path (required)')
parser.add_argument('-o', '--output', help='Output (.wts) file path (optional)')
args = parser.parse_args()
if not os.path.isfile(args.weights):
raise SystemExit('Invalid input file')
if not args.output:
args.output = os.path.splitext(args.weights)[0] + '.wts'
elif os.path.isdir(args.output):
args.output = os.path.join(
args.output,
os.path.splitext(os.path.basename(args.weights))[0] + '.wts')
return args.weights, args.output
pt_file, wts_file = parse_args()
# Initialize
device = select_device('cpu')
# Load model
model = torch.load(pt_file, map_location=device) # load to FP32
model = model['ema' if model.get('ema') else 'model'].float()
# update anchor_grid info
anchor_grid = model.model[-1].anchors * model.model[-1].stride[...,None,None]
# model.model[-1].anchor_grid = anchor_grid
delattr(model.model[-1], 'anchor_grid') # model.model[-1] is detect layer
model.model[-1].register_buffer("anchor_grid",anchor_grid) #The parameters are saved in the OrderDict through the "register_buffer" method, and then saved to the weight.
model.to(device).eval()
with open(wts_file, 'w') as f:
f.write('{}\n'.format(len(model.state_dict().keys())))
for k, v in model.state_dict().items():
vr = v.reshape(-1).cpu().numpy()
f.write('{} {} '.format(k, len(vr)))
for vv in vr:
f.write(' ')
f.write(struct.pack('>f' ,float(vv)).hex())
f.write('\n')
This diff is collapsed.
#ifndef __MACROS_H
#define __MACROS_H
#ifdef API_EXPORTS
#if defined(_MSC_VER)
#define API __declspec(dllexport)
#else
#define API __attribute__((visibility("default")))
#endif
#else
#if defined(_MSC_VER)
#define API __declspec(dllimport)
#else
#define API
#endif
#endif // API_EXPORTS
#if NV_TENSORRT_MAJOR >= 8
#define TRT_NOEXCEPT noexcept
#define TRT_CONST_ENQUEUE const
#else
#define TRT_NOEXCEPT
#define TRT_CONST_ENQUEUE
#endif
#endif // __MACROS_H
#include "preprocess.h"
#include <opencv2/opencv.hpp>
__global__ void warpaffine_kernel(
uint8_t* src, int src_line_size, int src_width,
int src_height, float* dst, int dst_width,
int dst_height, uint8_t const_value_st,
AffineMatrix d2s, int edge) {
int position = blockDim.x * blockIdx.x + threadIdx.x;
if (position >= edge) return;
float m_x1 = d2s.value[0];
float m_y1 = d2s.value[1];
float m_z1 = d2s.value[2];
float m_x2 = d2s.value[3];
float m_y2 = d2s.value[4];
float m_z2 = d2s.value[5];
int dx = position % dst_width;
int dy = position / dst_width;
float src_x = m_x1 * dx + m_y1 * dy + m_z1 + 0.5f;
float src_y = m_x2 * dx + m_y2 * dy + m_z2 + 0.5f;
float c0, c1, c2;
if (src_x <= -1 || src_x >= src_width || src_y <= -1 || src_y >= src_height) {
// out of range
c0 = const_value_st;
c1 = const_value_st;
c2 = const_value_st;
} else {
int y_low = floorf(src_y);
int x_low = floorf(src_x);
int y_high = y_low + 1;
int x_high = x_low + 1;
uint8_t const_value[] = {const_value_st, const_value_st, const_value_st};
float ly = src_y - y_low;
float lx = src_x - x_low;
float hy = 1 - ly;
float hx = 1 - lx;
float w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
uint8_t* v1 = const_value;
uint8_t* v2 = const_value;
uint8_t* v3 = const_value;
uint8_t* v4 = const_value;
if (y_low >= 0) {
if (x_low >= 0)
v1 = src + y_low * src_line_size + x_low * 3;
if (x_high < src_width)
v2 = src + y_low * src_line_size + x_high * 3;
}
if (y_high < src_height) {
if (x_low >= 0)
v3 = src + y_high * src_line_size + x_low * 3;
if (x_high < src_width)
v4 = src + y_high * src_line_size + x_high * 3;
}
c0 = w1 * v1[0] + w2 * v2[0] + w3 * v3[0] + w4 * v4[0];
c1 = w1 * v1[1] + w2 * v2[1] + w3 * v3[1] + w4 * v4[1];
c2 = w1 * v1[2] + w2 * v2[2] + w3 * v3[2] + w4 * v4[2];
}
//bgr to rgb
float t = c2;
c2 = c0;
c0 = t;
//normalization
c0 = c0 / 255.0f;
c1 = c1 / 255.0f;
c2 = c2 / 255.0f;
//rgbrgbrgb to rrrgggbbb
int area = dst_width * dst_height;
float* pdst_c0 = dst + dy * dst_width + dx;
float* pdst_c1 = pdst_c0 + area;
float* pdst_c2 = pdst_c1 + area;
*pdst_c0 = c0;
*pdst_c1 = c1;
*pdst_c2 = c2;
}
void preprocess_kernel_img(
uint8_t* src, int src_width, int src_height,
float* dst, int dst_width, int dst_height,
cudaStream_t stream) {
AffineMatrix s2d,d2s;
float scale = std::min(dst_height / (float)src_height, dst_width / (float)src_width);
s2d.value[0] = scale;
s2d.value[1] = 0;
s2d.value[2] = -scale * src_width * 0.5 + dst_width * 0.5;
s2d.value[3] = 0;
s2d.value[4] = scale;
s2d.value[5] = -scale * src_height * 0.5 + dst_height * 0.5;
cv::Mat m2x3_s2d(2, 3, CV_32F, s2d.value);
cv::Mat m2x3_d2s(2, 3, CV_32F, d2s.value);
cv::invertAffineTransform(m2x3_s2d, m2x3_d2s);
memcpy(d2s.value, m2x3_d2s.ptr<float>(0), sizeof(d2s.value));
int jobs = dst_height * dst_width;
int threads = 256;
int blocks = ceil(jobs / (float)threads);
warpaffine_kernel<<<blocks, threads, 0, stream>>>(
src, src_width*3, src_width,
src_height, dst, dst_width,
dst_height, 128, d2s, jobs);
}
#ifndef __PREPROCESS_H
#define __PREPROCESS_H
#include <cuda_runtime.h>
#include <cstdint>
struct AffineMatrix{
float value[6];
};
void preprocess_kernel_img(uint8_t* src, int src_width, int src_height,
float* dst, int dst_width, int dst_height,
cudaStream_t stream);
#endif // __PREPROCESS_H
../yolov3-spp/samples/
\ No newline at end of file
#ifndef TRTX_YOLOV5_UTILS_H_
#define TRTX_YOLOV5_UTILS_H_
#include <dirent.h>
#include <opencv2/opencv.hpp>
static inline cv::Mat preprocess_img(cv::Mat& img, int input_w, int input_h) {
int w, h, x, y;
float r_w = input_w / (img.cols*1.0);
float r_h = input_h / (img.rows*1.0);
if (r_h > r_w) {
w = input_w;
h = r_w * img.rows;
x = 0;
y = (input_h - h) / 2;
} else {
w = r_h * img.cols;
h = input_h;
x = (input_w - w) / 2;
y = 0;
}
cv::Mat re(h, w, CV_8UC3);
cv::resize(img, re, re.size(), 0, 0, cv::INTER_LINEAR);
cv::Mat out(input_h, input_w, CV_8UC3, cv::Scalar(128, 128, 128));
re.copyTo(out(cv::Rect(x, y, re.cols, re.rows)));
return out;
}
static inline int read_files_in_dir(const char *p_dir_name, std::vector<std::string> &file_names) {
DIR *p_dir = opendir(p_dir_name);
if (p_dir == nullptr) {
return -1;
}
struct dirent* p_file = nullptr;
while ((p_file = readdir(p_dir)) != nullptr) {
if (strcmp(p_file->d_name, ".") != 0 &&
strcmp(p_file->d_name, "..") != 0) {
//std::string cur_file_name(p_dir_name);
//cur_file_name += "/";
//cur_file_name += p_file->d_name;
std::string cur_file_name(p_file->d_name);
file_names.push_back(cur_file_name);
}
}
closedir(p_dir);
return 0;
}
#endif // TRTX_YOLOV5_UTILS_H_
This diff is collapsed.
#ifndef _YOLO_LAYER_H
#define _YOLO_LAYER_H
#include <vector>
#include <string>
#include <NvInfer.h>
#include "macros.h"
namespace Yolo
{
static constexpr int CHECK_COUNT = 3;
static constexpr float IGNORE_THRESH = 0.1f;
struct YoloKernel
{
int width;
int height;
float anchors[CHECK_COUNT * 2];
};
static constexpr int MAX_OUTPUT_BBOX_COUNT = 1000;
static constexpr int CLASS_NUM = 80;
static constexpr int INPUT_H = 640; // yolov5's input height and width must be divisible by 32.
static constexpr int INPUT_W = 640;
static constexpr int LOCATIONS = 4;
struct alignas(float) Detection {
//center_x center_y w h
float bbox[LOCATIONS];
float conf; // bbox_conf * cls_conf
float class_id;
};
}
namespace nvinfer1
{
class API YoloLayerPlugin : public IPluginV2IOExt
{
public:
YoloLayerPlugin(int classCount, int netWidth, int netHeight, int maxOut, const std::vector<Yolo::YoloKernel>& vYoloKernel);
YoloLayerPlugin(const void* data, size_t length);
~YoloLayerPlugin();
int getNbOutputs() const TRT_NOEXCEPT override
{
return 1;
}
Dims getOutputDimensions(int index, const Dims* inputs, int nbInputDims) TRT_NOEXCEPT override;
int initialize() TRT_NOEXCEPT override;
virtual void terminate() TRT_NOEXCEPT override {};
virtual size_t getWorkspaceSize(int maxBatchSize) const TRT_NOEXCEPT override { return 0; }
virtual int enqueue(int batchSize, const void* const* inputs, void*TRT_CONST_ENQUEUE* outputs, void* workspace, cudaStream_t stream) TRT_NOEXCEPT override;
virtual size_t getSerializationSize() const TRT_NOEXCEPT override;
virtual void serialize(void* buffer) const TRT_NOEXCEPT override;
bool supportsFormatCombination(int pos, const PluginTensorDesc* inOut, int nbInputs, int nbOutputs) const TRT_NOEXCEPT override {
return inOut[pos].format == TensorFormat::kLINEAR && inOut[pos].type == DataType::kFLOAT;
}
const char* getPluginType() const TRT_NOEXCEPT override;
const char* getPluginVersion() const TRT_NOEXCEPT override;
void destroy() TRT_NOEXCEPT override;
IPluginV2IOExt* clone() const TRT_NOEXCEPT override;
void setPluginNamespace(const char* pluginNamespace) TRT_NOEXCEPT override;
const char* getPluginNamespace() const TRT_NOEXCEPT override;
DataType getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const TRT_NOEXCEPT override;
bool isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const TRT_NOEXCEPT override;
bool canBroadcastInputAcrossBatch(int inputIndex) const TRT_NOEXCEPT override;
void attachToContext(
cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator) TRT_NOEXCEPT override;
void configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput) TRT_NOEXCEPT override;
void detachFromContext() TRT_NOEXCEPT override;
private:
void forwardGpu(const float* const* inputs, float *output, cudaStream_t stream, int batchSize = 1);
int mThreadCount = 256;
const char* mPluginNamespace;
int mKernelCount;
int mClassCount;
int mYoloV5NetWidth;
int mYoloV5NetHeight;
int mMaxOutObject;
std::vector<Yolo::YoloKernel> mYoloKernel;
void** mAnchor;
};
class API YoloPluginCreator : public IPluginCreator
{
public:
YoloPluginCreator();
~YoloPluginCreator() override = default;
const char* getPluginName() const TRT_NOEXCEPT override;
const char* getPluginVersion() const TRT_NOEXCEPT override;
const PluginFieldCollection* getFieldNames() TRT_NOEXCEPT override;
IPluginV2IOExt* createPlugin(const char* name, const PluginFieldCollection* fc) TRT_NOEXCEPT override;
IPluginV2IOExt* deserializePlugin(const char* name, const void* serialData, size_t serialLength) TRT_NOEXCEPT override;
void setPluginNamespace(const char* libNamespace) TRT_NOEXCEPT override
{
mNamespace = libNamespace;
}
const char* getPluginNamespace() const TRT_NOEXCEPT override
{
return mNamespace.c_str();
}
private:
std::string mNamespace;
static PluginFieldCollection mFC;
static std::vector<PluginField> mPluginAttributes;
};
REGISTER_TENSORRT_PLUGIN(YoloPluginCreator);
};
#endif // _YOLO_LAYER_H
#ifndef _YOLO_LAYER_H
#define _YOLO_LAYER_H
#include <vector>
#include <string>
#include <NvInfer.h>
#include "macros.h"
namespace Yolo
{
static constexpr int CHECK_COUNT = 3;
static constexpr float IGNORE_THRESH = 0.1f;
struct YoloKernel
{
int width;
int height;
float anchors[CHECK_COUNT * 2];
};
static constexpr int MAX_OUTPUT_BBOX_COUNT = 1000;
static constexpr int CLASS_NUM = 2;
static constexpr int INPUT_H = 244;
static constexpr int INPUT_H = 244;
static constexpr int LOCATIONS = 4;
struct alignas(float) Detection {
//center_x center_y w h
float bbox[LOCATIONS];
float conf; // bbox_conf * cls_conf
float class_id;
};
}
namespace nvinfer1
{
class API YoloLayerPlugin : public IPluginV2IOExt
{
public:
YoloLayerPlugin(int classCount, int netWidth, int netHeight, int maxOut, const std::vector<Yolo::YoloKernel>& vYoloKernel);
YoloLayerPlugin(const void* data, size_t length);
~YoloLayerPlugin();
int getNbOutputs() const TRT_NOEXCEPT override
{
return 1;
}
Dims getOutputDimensions(int index, const Dims* inputs, int nbInputDims) TRT_NOEXCEPT override;
int initialize() TRT_NOEXCEPT override;
virtual void terminate() TRT_NOEXCEPT override {};
virtual size_t getWorkspaceSize(int maxBatchSize) const TRT_NOEXCEPT override { return 0; }
virtual int enqueue(int batchSize, const void* const* inputs, void*TRT_CONST_ENQUEUE* outputs, void* workspace, cudaStream_t stream) TRT_NOEXCEPT override;
virtual size_t getSerializationSize() const TRT_NOEXCEPT override;
virtual void serialize(void* buffer) const TRT_NOEXCEPT override;
bool supportsFormatCombination(int pos, const PluginTensorDesc* inOut, int nbInputs, int nbOutputs) const TRT_NOEXCEPT override {
return inOut[pos].format == TensorFormat::kLINEAR && inOut[pos].type == DataType::kFLOAT;
}
const char* getPluginType() const TRT_NOEXCEPT override;
const char* getPluginVersion() const TRT_NOEXCEPT override;
void destroy() TRT_NOEXCEPT override;
IPluginV2IOExt* clone() const TRT_NOEXCEPT override;
void setPluginNamespace(const char* pluginNamespace) TRT_NOEXCEPT override;
const char* getPluginNamespace() const TRT_NOEXCEPT override;
DataType getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const TRT_NOEXCEPT override;
bool isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const TRT_NOEXCEPT override;
bool canBroadcastInputAcrossBatch(int inputIndex) const TRT_NOEXCEPT override;
void attachToContext(
cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator) TRT_NOEXCEPT override;
void configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput) TRT_NOEXCEPT override;
void detachFromContext() TRT_NOEXCEPT override;
private:
void forwardGpu(const float* const* inputs, float *output, cudaStream_t stream, int batchSize = 1);
int mThreadCount = 256;
const char* mPluginNamespace;
int mKernelCount;
int mClassCount;
int mYoloV5NetWidth;
int mYoloV5NetHeight;
int mMaxOutObject;
std::vector<Yolo::YoloKernel> mYoloKernel;
void** mAnchor;
};
class API YoloPluginCreator : public IPluginCreator
{
public:
YoloPluginCreator();
~YoloPluginCreator() override = default;
const char* getPluginName() const TRT_NOEXCEPT override;
const char* getPluginVersion() const TRT_NOEXCEPT override;
const PluginFieldCollection* getFieldNames() TRT_NOEXCEPT override;
IPluginV2IOExt* createPlugin(const char* name, const PluginFieldCollection* fc) TRT_NOEXCEPT override;
IPluginV2IOExt* deserializePlugin(const char* name, const void* serialData, size_t serialLength) TRT_NOEXCEPT override;
void setPluginNamespace(const char* libNamespace) TRT_NOEXCEPT override
{
mNamespace = libNamespace;
}
const char* getPluginNamespace() const TRT_NOEXCEPT override
{
return mNamespace.c_str();
}
private:
std::string mNamespace;
static PluginFieldCollection mFC;
static std::vector<PluginField> mPluginAttributes;
};
REGISTER_TENSORRT_PLUGIN(YoloPluginCreator);
};
#endif // _YOLO_LAYER_H
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
opencv-python==4.5.5.62
pycuda==2020.1
numpy==1.19.4
requests>=2.23.0
expiringdict==1.2.1
minio==7.1.3
cachetools==4.2.4
pymongo==4.0.1
Cython==0.29.21
paho-mqtt==1.5.0
scikit-learn==0.22.2.post1
python-dateutil==2.8.2
imutils==0.5.4
\ No newline at end of file
from.cement_counter import CementBagCounter
\ No newline at end of file
This diff is collapsed.
import os
import sys
import json
MAIN_OS_VARIABLE = json.loads(os.environ.get('config'))
if MAIN_OS_VARIABLE is None:
sys.stderr.write("Configuration not found...")
sys.stderr.write("Exiting....")
sys.exit(1)
MONGO_URI = MAIN_OS_VARIABLE.get('MONGO_URI')
MONGO_SERVICE_DB = MAIN_OS_VARIABLE.get('MONGO_DB')
MONGO_SERVICE_COLL = MAIN_OS_VARIABLE.get('MONGO_COLL')
PASS_KEY = MAIN_OS_VARIABLE.get('PASS_KEY')
class JanusDeploymentConstants:
JANUS_DEPLOYMENT_COLLECTION = "janusDeploymentConfigurations"
DEPLOYMENT_ID = 'deploymentId'
EXTRA_FIELDS_KEY = 'extra_fields'
LINE_COORDINATES = ['x1', 'y1', 'x2', 'y2']
COUNT_BAGS_FLAG = 'count_bags'
ALIGNMENT_KEY = 'alignment'
VERTICAL = 'vertical'
HORIZONTAL = 'horizontal'
MODEL_KEY = 'model'
DIRECTION_KEY = 'direction'
MRP_DETECT_KEY = 'mrp_detect'
class CameraConstants:
videortpmap="VP8/90000"
videopt=96
threads=3
gStreamer = False
eventType = 'deploy'
created_by = 'user_6501'
event_status = 'pending'
deploymentTypeCreate = 'upgrade_and_deploy'
deploymentTypeRemove = 'remove'
pipeline_internal = {}
pipeline_category = "ai"
thread = 1
job_id = "pipeline_129"
deployment_key = 'deploymentId'
pipeline_deployment_type = 'docker'
docker_deployment_type = 'single'
command_eventtype = 'command'
command_type = 'docker'
restart_command = "restart_container"
stop_command = "stop_container"
start_command = "start_container"
from edge_engine.ai.model.modelwraper import ModelWrapper
import cv2
import base64
class LoopBackModel(ModelWrapper):
def __init__(self, pubs, path=None, ):
super().__init__(path)
self.mqtt = pubs.mqtt_pub
def _pre_process(self, x):
return x
def _post_process(self, x):
image = cv2.imencode('.jpg', x['frame'])[1].tostring()
image = 'data:image/jpeg;base64,' + base64.b64encode(image).decode("utf-8")
x['frame'] = image
self.mqtt.publish(x)
return x
def _predict(self, x):
return x
def predict(self, x):
return super().predict(x)
# import the necessary packages
from scipy.spatial import distance as dist
from collections import OrderedDict
import numpy as np
from edge_engine.common.logsetup import logger
class CentroidTracker():
def __init__(self, maxDisappeared=50):
# initialize the next unique object ID along with two ordered
# dictionaries used to keep track of mapping a given object
# ID to its centroid and number of consecutive frames it has
# been marked as "disappeared", respectively
self.nextObjectID = 0
self.objects = OrderedDict()
self.disappeared = OrderedDict()
# store the number of maximum consecutive frames a given
# object is allowed to be marked as "disappeared" until we
# need to deregister the object from tracking
self.maxDisappeared = maxDisappeared
def register(self, centroid):
# when registering an object we use the next available object
# ID to store the centroid
self.objects[self.nextObjectID] = {'has_print': False, 'centroid': centroid}
self.disappeared[self.nextObjectID] = 0
self.nextObjectID += 1
def deregister(self, objectID):
# to deregister an object ID we delete the object ID from
# both of our respective dictionaries
del self.objects[objectID]
del self.disappeared[objectID]
def update(self, rects):
# check to see if the list of input bounding box rectangles
# is empty
if len(rects) == 0:
# loop over any existing tracked objects and mark them
# as disappeared
for objectID in list(self.disappeared.keys()):
self.disappeared[objectID] += 1
# if we have reached a maximum number of consecutive
# frames where a given object has been marked as
# missing, deregister it
if self.disappeared[objectID] > self.maxDisappeared:
self.deregister(objectID)
# return early as there are no centroids or tracking info
# to update
return self.objects
# initialize an array of input centroids for the current frame
inputCentroids = np.zeros((len(rects), 2), dtype="int")
# loop over the bounding box rectangles
for (i, (startX, startY, endX, endY)) in enumerate(rects):
# use the bounding box coordinates to derive the centroid
cX = int((startX + endX) / 2.0)
cY = int((startY + endY) / 2.0)
inputCentroids[i] = (cX, cY)
# if we are currently not tracking any objects take the input
# centroids and register each of them
if len(self.objects) == 0:
for i in range(0, len(inputCentroids)):
self.register(inputCentroids[i])
# otherwise, are are currently tracking objects so we need to
# try to match the input centroids to existing object
# centroids
else:
# grab the set of object IDs and corresponding centroids
objectIDs = list(self.objects.keys())
objectCentroids = [e['centroid'] for e in self.objects.values()]
# compute the distance between each pair of object
# centroids and input centroids, respectively -- our
# goal will be to match an input centroid to an existing
# object centroid
# logger.info(f"OBC --> {objectCentroids}")
try:
D = dist.cdist(np.array(objectCentroids), inputCentroids)
except Exception as e:
logger.info(f"objectCentroids --> {objectCentroids}")
logger.info(f"inputCentroids --> {inputCentroids}")
logger.exception(e)
# in order to perform this matching we must (1) find the
# smallest value in each row and then (2) sort the row
# indexes based on their minimum values so that the row
# with the smallest value as at the *front* of the index
# list
rows = D.min(axis=1).argsort()
# next, we perform a similar process on the columns by
# finding the smallest value in each column and then
# sorting using the previously computed row index list
cols = D.argmin(axis=1)[rows]
# in order to determine if we need to update, register,
# or deregister an object we need to keep track of which
# of the rows and column indexes we have already examined
usedRows = set()
usedCols = set()
# loop over the combination of the (row, column) index
# tuples
for (row, col) in zip(rows, cols):
# if we have already examined either the row or
# column value before, ignore it
# val
if row in usedRows or col in usedCols:
continue
# otherwise, grab the object ID for the current row,
# set its new centroid, and reset the disappeared
# counter
objectID = objectIDs[row]
self.objects[objectID]['centroid'] = inputCentroids[col]
self.disappeared[objectID] = 0
# indicate that we have examined each of the row and
# column indexes, respectively
usedRows.add(row)
usedCols.add(col)
# compute both the row and column index we have NOT yet
# examined
unusedRows = set(range(0, D.shape[0])).difference(usedRows)
unusedCols = set(range(0, D.shape[1])).difference(usedCols)
# in the event that the number of object centroids is
# equal or greater than the number of input centroids
# we need to check and see if some of these objects have
# potentially disappeared
if D.shape[0] >= D.shape[1]:
# loop over the unused row indexes
for row in unusedRows:
# grab the object ID for the corresponding row
# index and increment the disappeared counter
objectID = objectIDs[row]
self.disappeared[objectID] += 1
# check to see if the number of consecutive
# frames the object has been marked "disappeared"
# for warrants deregistering the object
if self.disappeared[objectID] > self.maxDisappeared:
self.deregister(objectID)
# otherwise, if the number of input centroids is greater
# than the number of existing object centroids we need to
# register each new input centroid as a trackable object
else:
for col in unusedCols:
self.register(inputCentroids[col])
# return the set of trackable objects
return self.objects
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 20 14:51:33 2017
@author: kyleguan
"""
import numpy as np
import cv2
np.seterr(divide='ignore', invalid='ignore')
class Box:
def __init__(self):
self.x, self.y = float(), float()
self.w, self.h = float(), float()
self.c = float()
self.prob = float()
def overlap(x1, w1, x2, w2):
l1 = x1 - w1 / 2.;
l2 = x2 - w2 / 2.;
left = max(l1, l2)
r1 = x1 + w1 / 2.;
r2 = x2 + w2 / 2.;
right = min(r1, r2)
return right - left;
def box_intersection(a, b):
w = overlap(a.x, a.w, b.x, b.w);
h = overlap(a.y, a.h, b.y, b.h);
if w < 0 or h < 0: return 0;
area = w * h;
return area;
def box_union(a, b):
i = box_intersection(a, b);
u = a.w * a.h + b.w * b.h - i;
return u;
def box_iou(a, b):
return box_intersection(a, b) / box_union(a, b);
def box_iou2(a, b):
'''
Helper funciton to calculate the ratio between intersection and the union of
two boxes a and b
a[0], a[1], a[2], a[3] <-> left, up, right, bottom
'''
w_intsec = np.maximum(0, (np.minimum(a[2], b[2]) - np.maximum(a[0], b[0])))
h_intsec = np.maximum(0, (np.minimum(a[3], b[3]) - np.maximum(a[1], b[1])))
s_intsec = w_intsec * h_intsec
s_a = (a[2] - a[0]) * (a[3] - a[1])
s_b = (b[2] - b[0]) * (b[3] - b[1])
return float(s_intsec) / (s_a + s_b - s_intsec)
def convert_to_pixel(box_yolo, img, crop_range):
'''
Helper function to convert (scaled) coordinates of a bounding box
to pixel coordinates.
Example (0.89361443264143803, 0.4880486045564924, 0.23544462956491041,
0.36866588651069609)
crop_range: specifies the part of image to be cropped
'''
box = box_yolo
imgcv = img
[xmin, xmax] = crop_range[0]
[ymin, ymax] = crop_range[1]
h, w, _ = imgcv.shape
# Calculate left, top, width, and height of the bounding box
left = int((box.x - box.w / 2.) * (xmax - xmin) + xmin)
top = int((box.y - box.h / 2.) * (ymax - ymin) + ymin)
width = int(box.w * (xmax - xmin))
height = int(box.h * (ymax - ymin))
# Deal with corner cases
if left < 0: left = 0
if top < 0: top = 0
# Return the coordinates (in the unit of the pixels)
box_pixel = np.array([left, top, width, height])
return box_pixel
def convert_to_cv2bbox(bbox, img_dim=(1280, 720)):
'''
Helper fucntion for converting bbox to bbox_cv2
bbox = [left, top, width, height]
bbox_cv2 = [left, top, right, bottom]
img_dim: dimension of the image, img_dim[0]<-> x
img_dim[1]<-> y
'''
left = np.maximum(0, bbox[0])
top = np.maximum(0, bbox[1])
right = np.minimum(img_dim[0], bbox[0] + bbox[2])
bottom = np.minimum(img_dim[1], bbox[1] + bbox[3])
return (left, top, right, bottom)
def draw_box_label(id, img, bbox_cv2, box_color=(0, 255, 255), show_label=True):
'''
Helper funciton for drawing the bounding boxes and the labels
bbox_cv2 = [left, top, right, bottom]
'''
# box_color= (0, 255, 255)
font = cv2.FONT_HERSHEY_SIMPLEX
font_size = 0.7
font_color = (0, 0, 0)
left, top, right, bottom = bbox_cv2[1], bbox_cv2[0], bbox_cv2[3], bbox_cv2[2]
# Draw the bounding box
cv2.rectangle(img, (left, top), (right, bottom), box_color, 4)
# centroid = [int(left+((right - left)/2)), int(top+((bottom - top)/2))]
if show_label:
# Draw a filled box on top of the bounding box (as the background for the labels)
cv2.rectangle(img, (left - 2, top - 45), (right + 2, top), box_color, -1, 1)
# Output the labels that show the x and y coordinates of the bounding box center.
text_x = 'id=' + str(id)
cv2.putText(img, text_x, (left, top - 25), font, font_size, font_color, 1, cv2.LINE_AA)
text_y = 'y=' + str((top + bottom) / 2)
# cv2.putText(img, text_y, (left, top - 5), font, font_size, font_color, 1, cv2.LINE_AA)
return img
This diff is collapsed.
from edge_engine.common.logsetup import logger
import cv2
def draw_circles_on_frame(frame, point, radius=3, color=(255, 255, 255), thickness=1):
"""
draw circle on the objects
:param radius: radius of the circle
:param frame: frame to draw on
:param point: co-ordinate to draw on
:param color: color of the circle
:param thickness: thickness of the circle
:return: frame
"""
logger.debug("Drawing circle centroid on the frame")
return cv2.circle(frame, tuple(point), radius, color, thickness)
def resize_to_64_64(frame):
"""
resize the from
:param frame: frame
:return: frame
"""
logger.debug("Resizing the frame to 64 x 64")
return cv2.resize(frame, (64, 64))
\ No newline at end of file
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
black_white_ratio_dict = {'ambuja_plus': 1.2, 'acc_gold': 1.2, 'acc_suraksha_power_plus': 1.2, 'ambuja_buildcem': 1.2, 'acc_suraksha_power': 1.2, 'acc_nfr': 1.2, 'acc_concrete_plus': 1.2}
lack_white_ratio = black_white_ratio_dict["{class_name}".format(class_name="ambuja_plus")]
print(lack_white_ratio)
\ No newline at end of file
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment