Commit e5b65552 authored by sikhin.vc's avatar sikhin.vc

initial commit

parent d8a42d85
# yolov8_keypoint_detection # **yolov8_keypoint_detection**
**Annotation:**
Annotate images using coco annnotator.
Instruction are inside coco-annotator directory
How to annotate: https://www.youtube.com/watch?v=OMJRcjnMMok&pp=ygUlaG93IHRvIGFubm90YXRlIHVzaW5nIGNvY28gYW5ub3RhdG9yIA%3D%3D
Convert json annotation to txt format using convert_coco_to_txt.py code
Augment and split the dataste with the script given
Modify custom_kpts.yaml as per your dataset
Start training!!
import json
import os.path
import shutil
from pathlib import Path
import random
import uuid
# import albumentations as a
import cv2
import numpy as np
annotation_directory = Path("/home/shikhin/Pictures/wipro_gate_video_frames/images")
labels_directory = Path("/home/shikhin/Pictures/wipro_gate_video_frames/labels")
annotation_file_path = Path("D:/pycharmprojects/segmentation/dataset/rect_gauge_coco.json")
post_process_directory = Path("/home/shikhin/Pictures/wipro_gate_video_frames/images")
labels_post_process_directory = Path("/home/shikhin/Pictures/wipro_gate_video_frames/labels")
class DataAugmentation:
"""
Handles with various augmentations for dataset.
"""
def __init__(self):
pass
# self.pool.apply_async(self.run_augmentations,
# (annotation_directory, post_process_directory, filename, each_file))
def brightness(self, img, low, high):
value = random.uniform(low, high)
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
hsv = np.array(hsv, dtype=np.float64)
hsv[:, :, 1] = hsv[:, :, 1] * value
hsv[:, :, 1][hsv[:, :, 1] > 255] = 255
hsv[:, :, 2] = hsv[:, :, 2] * value
hsv[:, :, 2][hsv[:, :, 2] > 255] = 255
hsv = np.array(hsv, dtype=np.uint8)
img = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
return img
def brightness_augmentation(self, image, low=0.5, high=1.5):
brightness_factor = np.random.uniform(low, high)
augmented_image = image * brightness_factor
augmented_image = np.clip(augmented_image, 0, 255).astype(np.uint8)
return augmented_image
def contrast_augmentation(self, image, low=0.5, high=1.5):
contrast_factor = np.random.uniform(low, high)
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray_image = np.clip(contrast_factor * gray_image, 0, 255).astype(np.uint8)
augmented_image = cv2.cvtColor(gray_image, cv2.COLOR_GRAY2BGR)
return augmented_image
def gaussian_noise_augmentation(self, image, mean=0, std=25):
noise = np.random.normal(mean, std, image.shape).astype(np.uint8)
augmented_image = cv2.add(image, noise)
augmented_image = np.clip(augmented_image, 0, 255).astype(np.uint8)
return augmented_image
def blur_augmentation(self, image, kernel_size=5):
augmented_image = cv2.GaussianBlur(image, (kernel_size, kernel_size), 0)
return augmented_image
def process(self, annotation_directory, post_process_directory, labels_directory, labels_post_process_directory):
assert os.path.exists(annotation_directory)
if not os.path.exists(post_process_directory):
os.mkdir(post_process_directory)
image_names = os.listdir(annotation_directory)
image_name_li = []
for each_image in image_names:
filename, file_extension = os.path.splitext(each_image)
image_name_li.append(filename)
label_names = os.listdir(labels_directory)
label_name_li = []
for each_label in label_names:
filename, file_extension = os.path.splitext(each_label)
label_name_li.append(filename)
for each_image in image_name_li:
if each_image in label_name_li:
# for each_file, label_file in zip(os.listdir(annotation_directory), os.listdir(labels_directory)):
# filename, file_extension = os.path.splitext(each_file)
# label_filename, label_file_extension = os.path.splitext(label_file)
# print(filename)
# print(label_filename)
# print(filename, file_extension)
# print(label_filename, label_file_extension)
# if filename == label_filename:
# if file_extension in ['.jpg', '.jpeg', '.png']:
image_name_with_ext = each_image + ".jpg"
label_name_with_ext = each_image + ".txt"
image = cv2.imread(os.path.join(annotation_directory, image_name_with_ext))
multi_images = (
self.blur_augmentation(image), self.gaussian_noise_augmentation(image),
self.brightness(image, 0.5, 3),
self.contrast_augmentation(image), self.brightness_augmentation(image))
_file_name = 0
for each_element in multi_images:
image = each_element
uuid_name = uuid.uuid1()
print(uuid_name)
print(os.path.join(post_process_directory,
f"{image_name_with_ext[:-4]}" + "_" + f"{_file_name} {uuid_name}" + f"{file_extension}"))
cv2.imwrite(
os.path.join(post_process_directory,
f"{image_name_with_ext[:-4]}" + "_" + f"{_file_name} {uuid_name}" + ".jpg"),
image)
shutil.copy(os.path.join(labels_directory, label_name_with_ext),
os.path.join(labels_post_process_directory,
f"{each_image}" + "_" + f"{_file_name} {uuid_name}" + ".txt"))
_file_name = _file_name + 1
def combine_dataset(self, annotation_directory, post_process_directory, labels_directory,
labels_post_process_directory):
for each_file, label_file in zip(os.listdir(annotation_directory), os.listdir(labels_directory)):
shutil.copy(os.path.join(annotation_directory, each_file), post_process_directory)
shutil.copy(os.path.join(labels_directory, label_file), labels_post_process_directory)
obj = DataAugmentation()
obj.process(annotation_directory, post_process_directory, labels_directory, labels_post_process_directory)
datasets/*
models/*
\ No newline at end of file
.pytest_cache/*
.cache/*
.idea/*
datasets/*
!datasets/.gitkeep
.history/*
.vscode/*
__pycache__
dist/*
!dist/.gitkeep
docs/.vuepress/dist/*
models/*.h5
\ No newline at end of file
matrix:
include:
- language: python
python:
- 3.6
services:
- docker
cache: pip
env:
- MONGODB_HOST=mongodb://localhost/test
- DATASET_DIRECTORY=datasets/
- TESTING=true
- LOGIN_DISABLED=true
before_script:
- pip install -r backend/requirements.txt
- pip install pycocotools
- docker run -d -p 27017:27017 mongo
- docker ps -a
script: pytest
# - language: node_js
# node_js:
# - 8
# - node
# cache: npm
# before_install:
# - npm install -g npm@latest
# - cd client
# install:
# - npm i
# script: npm test
# Contributing to HTML5 Boilerplate
Love [COCO Annotator](/jsbroks/coco-annotator) and want to get involved?
Thanks! We're actively looking for folks interested in helping out and there
are plenty of ways you can help!
Please take a moment to review this document in order to make the contribution
process easy and effective for everyone involved.
Following these guidelines helps to communicate that you respect the time of
the developers managing and developing this open source project. In return,
they should reciprocate that respect in addressing your issue or assessing
patches and features.
## Using the issue tracker
The [issue tracker](/jsbroks/coco-annotator/issues) is
the preferred channel for [bug reports](#bugs), [features requests](#features)
and [submitting pull requests](#pull-requests), but please respect the following
restrictions:
* Please **do not** use the issue tracker for personal support requests (use
[Stack Overflow](https://stackoverflow.com/questions/tagged/coco-annotator)).
* Please **do not** derail or troll issues. Keep the discussion on topic and
respect the opinions of others.
<a name="bugs"></a>
## Bug reports
A bug is a _demonstrable problem_ that is caused by the code in the repository.
Good bug reports are extremely helpful - thank you!
Guidelines for bug reports:
1. **Use the GitHub issue search** &mdash; check if the issue has already been
reported.
2. **Check if the issue has been fixed** &mdash; try to reproduce it using the
latest `master` or development branch in the repository.
3. **Isolate the problem** &mdash; ideally create a [reduced test
case](https://css-tricks.com/reduced-test-cases/) and a live example.
A good bug report shouldn't leave others needing to chase you up for more
information. Please try to be as detailed as possible in your report. What is
your environment? What steps will reproduce the issue? What browser(s) and OS
experience the problem? What would you expect to be the outcome? All these
details will help people to fix any potential bugs.
Example:
> Short and descriptive example bug report title
>
> A summary of the issue and the browser/OS environment in which it occurs. If
> suitable, include the steps required to reproduce the bug.
>
> 1. This is the first step
> 2. This is the second step
> 3. Further steps, etc.
>
> `<url>` - a link to the reduced test case
>
> Any other information you want to share that is relevant to the issue being
> reported. This might include the lines of code that you have identified as
> causing the bug, and potential solutions (and your opinions on their
> merits).
<a name="features"></a>
## Feature requests
Feature requests are welcome. But take a moment to find out whether your idea
fits with the scope and aims of the project. It's up to *you* to make a strong
case to convince the project's developers of the merits of this feature. Please
provide as much detail and context as possible.
<a name="pull-requests"></a>
## Pull requests
Good pull requests - patches, improvements, new features - are a fantastic
help. They should remain focused in scope and avoid containing unrelated
commits.
**Please ask first** before embarking on any significant pull request (e.g.
implementing features, refactoring code, porting to a different language),
otherwise you risk spending a lot of time working on something that the
project's developers might not want to merge into the project.
Please adhere to the coding conventions used throughout a project (indentation,
accurate comments, etc.) and any other requirements (such as test coverage).
Adhering to the following process is the best way to get your work
included in the project:
1. [Fork](https://help.github.com/articles/fork-a-repo/) the project, clone your
fork, and configure the remotes:
```bash
# Clone your fork of the repo into the current directory
git clone https://github.com/<your-username>/html5-boilerplate.git
# Navigate to the newly cloned directory
cd html5-boilerplate
# Assign the original repo to a remote called "upstream"
git remote add upstream https://github.com/h5bp/html5-boilerplate.git
```
2. If you cloned a while ago, get the latest changes from upstream:
```bash
git checkout master
git pull upstream master
```
3. Create a new topic branch (off the main project development branch) to
contain your feature, change, or fix:
```bash
git checkout -b <topic-branch-name>
```
4. Commit your changes in logical chunks. Please adhere to these [git commit
message guidelines](https://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html)
or your code is unlikely be merged into the main project. Use Git's
[interactive rebase](https://help.github.com/articles/about-git-rebase/)
feature to tidy up your commits before making them public.
5. Locally merge (or rebase) the upstream development branch into your topic branch:
```bash
git pull [--rebase] upstream master
```
6. Push your topic branch up to your fork:
```bash
git push origin <topic-branch-name>
```
7. [Open a Pull Request](https://help.github.com/articles/using-pull-requests/)
with a clear title and description.
**IMPORTANT**: By submitting a patch, you agree to allow the project
owners to license your work under the terms of the [MIT License](LICENSE.txt).
FROM node:10 as build-stage
WORKDIR /workspace/
COPY ./client /workspace/client
RUN npm install -g @vue/cli@3.3.0
RUN npm install -g @vue/cli-service@3.3.0
COPY ./client/package* /workspace/
RUN npm install
ENV NODE_PATH=/workspace/node_modules
WORKDIR /workspace/client
RUN npm run build
FROM jsbroks/coco-annotator:python-env
WORKDIR /workspace/
COPY ./backend/ /workspace/
COPY ./.git /workspace/.git
RUN python set_path.py
COPY --from=build-stage /workspace/client/dist /workspace/dist
ENV FLASK_ENV=production
ENV DEBUG=false
EXPOSE 5000
CMD gunicorn -c webserver/gunicorn_config.py webserver:app --no-sendfile --timeout 180
MIT License
Copyright (c) 2018 Justin Brooks
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
<p align="center"><img src="https://i.imgur.com/AA7IdbQ.png"></p>
<p align="center">
<a href="#features">Features</a>
<a href="https://github.com/jsbroks/coco-annotator/wiki">Wiki</a>
<a href="https://github.com/jsbroks/coco-annotator/wiki/Getting-Started">Getting Started</a>
<a href="https://github.com/jsbroks/coco-annotator/issues">Issues</a>
<a href="#license">License</a>
</p>
---
<p align="center">
<a href="/jsbroks/coco-annotator/stargazers">
<img src="https://img.shields.io/github/stars/jsbroks/coco-annotator.svg">
</a>
<a href="/jsbroks/coco-annotator/issues">
<img src="https://img.shields.io/github/issues/jsbroks/coco-annotator.svg">
</a>
<a href="https://tldrlegal.com/license/mit-license">
<img src="https://img.shields.io/github/license/mashape/apistatus.svg">
</a>
<a href="https://lgtm.com/projects/g/jsbroks/coco-annotator/context:javascript">
<img src="https://img.shields.io/lgtm/grade/javascript/g/jsbroks/coco-annotator.svg?label=code%20quality">
</a>
<a href="https://annotator.justinbrooks.ca/">
<img src="https://img.shields.io/badge/demo-online-green.svg">
</a>
<a href="https://travis-ci.org/jsbroks/coco-annotator">
<img src="https://travis-ci.org/jsbroks/coco-annotator.svg?branch=master">
</a>
<a href="https://hub.docker.com/r/jsbroks/coco-annotator">
<img src="https://img.shields.io/docker/pulls/jsbroks/coco-annotator.svg">
</a>
</p>
COCO Annotator is a web-based image annotation tool designed for versatility and efficiently label images to create training data for image localization and object detection. It provides many distinct features including the ability to label an image segment (or part of a segment), track object instances, labeling objects with disconnected visible parts, efficiently storing and export annotations in the well-known [COCO format](http://cocodataset.org/#format-data). The annotation process is delivered through an intuitive and customizable interface and provides many tools for creating accurate datasets.
<br />
<p align="center">Join our growing <a href="https://discord.gg/4zP5Qkj">discord community</a> of ML practitioner</p>
<p align="center">
<a href="https://discord.gg/4zP5Qkj">
<img src="https://discord.com/assets/e4923594e694a21542a489471ecffa50.svg" width="120">
</a>
</p>
<br />
<p align="center"><a href="http://www.youtube.com/watch?feature=player_embedded&v=OMJRcjnMMok" target="_blank"><img src="https://img.youtube.com/vi/OMJRcjnMMok/maxresdefault.jpg"
alt="Image annotations using COCO Annotator" width="600" /></a></p>
<p align="center"><i>Checkout the video for a basic guide on installing and using COCO Annotator.</i></p>
<br />
<p align="center"><img width="600" src="https://i.imgur.com/m4RmjCp.gif"></p>
<p align="center"><i>Note: This video is from v0.1.0 and many new features have been added.</i></p>
<br>
<p align="center">If you enjoy my work please consider supporting me</p>
<p align="center">
<a href="https://www.patreon.com/jsbroks">
<img src="https://c5.patreon.com/external/logo/become_a_patron_button@2x.png" width="120">
</a>
</p>
<br>
# Features
Several annotation tools are currently available, with most applications as a desktop installation. Once installed, users can manually define regions in an image and creating a textual description. Generally, objects can be marked by a bounding box, either directly, through a masking tool, or by marking points to define the containing area. _COCO Annotator_ allows users to annotate images using free-form curves or polygons and provides many additional features were other annotations tool fall short.
- Directly export to COCO format
- Segmentation of objects
- Ability to add key points
- Useful API endpoints to analyze data
- Import datasets already annotated in COCO format
- Annotate disconnect objects as a single instance
- Labeling image segments with any number of labels simultaneously
- Allow custom metadata for each instance or object
- Advanced selection tools such as, [DEXTR](https://github.com/jsbroks/dextr-keras), [MaskRCNN](https://github.com/matterport/Mask_RCNN) and Magic Wand
- Annotate images with semi-trained models
- Generate datasets using google images
- User authentication system
For examples and more information check out the [wiki](https://github.com/jsbroks/coco-annotator/wiki).
# Demo
| Login Information |
| ---------------------- |
| **Username:** admin |
| **Password:** password |
https://annotator.justinbrooks.ca/
# Backers
If you enjoy the development of coco-annotator or are looking for an enterprise annotation tool, consider checking out DataTorch.
<p align="center">
<a href="https://datatorch.io">
<img src="https://i.imgur.com/sOQ1s5F.png" width="250" />
</a>
<p align="center">
https://datatorch.io · <a href="mailto:support@datatorch.io">support@datatorch.io</a> · <i>Next generation of coco-annotator</i>
</p>
</p>
# Built With
Thanks to all these wonderful libaries/frameworks:
### Backend
- [Flask](http://flask.pocoo.org/) - Python web microframework
- [MongoDB](https://www.mongodb.com/) - Cross-platform document-oriented database
- [MongoEngine](http://mongoengine.org/) - Python object data mapper for MongoDB
### Frontend
- [Vue](https://vuejs.org/) - JavaScript framework for building user interfaces
- [Axios](https://github.com/axios/axios) - Promise based HTTP client
- [PaperJS](http://paperjs.org/) - HTML canvas vector graphics library
- [Bootstrap](https://getbootstrap.com/) - Frontend component library
# License
[MIT](https://tldrlegal.com/license/mit-license)
# Citation
```
@MISC{cocoannotator,
author = {Justin Brooks},
title = {{COCO Annotator}},
howpublished = "\url{https://github.com/jsbroks/coco-annotator/}",
year = {2019},
}
```
# Backend environment docker image
FROM tensorflow/tensorflow:1.14.0-gpu-py3
RUN apt-get update
RUN apt-get install -y git
WORKDIR /workspace/
# Copy backend
COPY ./backend/requirements.txt /workspace/
# Install python package dependices
RUN pip install -r requirements.txt && \
pip install gunicorn[eventlet]==19.9.0 && \
pip install pycocotools
# Install maskrcnn
RUN git clone --single-branch --depth 1 https://github.com/matterport/Mask_RCNN.git /tmp/maskrcnn
#RUN cd /tmp/maskrcnn && pip install -r requirements.txt
RUN cd /tmp/maskrcnn && python3 setup.py install
# Install DEXTR
RUN git clone --single --depth 1 https://github.com/jsbroks/dextr-keras.git /tmp/dextr && \
cd /tmp/dextr && \
python setup.py install
RUN apt-get -y -o Dpkg::Options::="--force-confmiss" install --reinstall netbase
# COCO Annotator Backend
## Web Server
## Workers
## Database
## Config
from .config import *
\ No newline at end of file
import os
import subprocess
def get_tag():
result = subprocess.run(["git", "describe", "--abbrev=0", "--tags"], stdout=subprocess.PIPE)
return str(result.stdout.decode("utf-8")).strip()
def _get_bool(key, default_value):
if key in os.environ:
value = os.environ[key]
if value == 'True' or value == 'true' or value == '1':
return True
return False
return default_value
class Config:
NAME = os.getenv("NAME", "COCO Annotator")
VERSION = get_tag()
### File Watcher
FILE_WATCHER = os.getenv("FILE_WATCHER", False)
IGNORE_DIRECTORIES = ["_thumbnail", "_settings"]
# Flask/Gunicorn
#
# LOG_LEVEL - The granularity of log output
#
# A string of "debug", "info", "warning", "error", "critical"
#
# WORKER_CONNECTIONS - limits the maximum number of simultaneous
# clients that a single process can handle.
#
# A positive integer generally set to around 1000.
#
# WORKER_TIMEOUT - If a worker does not notify the master process
# in this number of seconds it is killed and a new worker is
# spawned to replace it.
#
SWAGGER_UI_JSONEDITOR = True
DEBUG = os.getenv("DEBUG", 'false').lower() == 'true'
PRELOAD = False
MAX_CONTENT_LENGTH = os.getenv("MAX_CONTENT_LENGTH", 1 * 1024 * 1024 * 1024) # 1GB
MONGODB_HOST = os.getenv("MONGODB_HOST", "mongodb://database/flask")
SECRET_KEY = os.getenv("SECRET_KEY", "<--- CHANGE THIS KEY --->")
LOG_LEVEL = 'debug'
WORKER_CONNECTIONS = 1000
TESTING = os.getenv("TESTING", False)
### Workers
CELERY_BROKER_URL = os.getenv("CELERY_BROKER_URL", "amqp://user:password@messageq:5672//")
CELERY_RESULT_BACKEND = os.getenv("CELERY_RESULT_BACKEND", "mongodb://database/flask")
### Dataset Options
DATASET_DIRECTORY = os.getenv("DATASET_DIRECTORY", "/datasets/")
INITIALIZE_FROM_FILE = os.getenv("INITIALIZE_FROM_FILE")
### User Options
LOGIN_DISABLED = _get_bool("LOGIN_DISABLED", False)
ALLOW_REGISTRATION = _get_bool('ALLOW_REGISTRATION', True)
### Models
MASK_RCNN_FILE = os.getenv("MASK_RCNN_FILE", "")
MASK_RCNN_CLASSES = os.getenv("MASK_RCNN_CLASSES", "BG")
DEXTR_FILE = os.getenv("DEXTR_FILE", "/models/dextr_pascal-sbd.h5")
__all__ = ["Config"]
from mongoengine import connect
from config import Config
from .annotations import *
from .categories import *
from .datasets import *
from .lisence import *
from .exports import *
from .images import *
from .events import *
from .users import *
from .tasks import *
import json
def connect_mongo(name, host=None):
if host is None:
host = Config.MONGODB_HOST
connect(name, host=host)
# https://github.com/MongoEngine/mongoengine/issues/1171
# Use this methods until a solution is found
def upsert(model, query=None, update=None):
if not update:
update = query
if not query:
return None
found = model.objects(**query)
if found.first():
return found.modify(new=True, **update)
new_model = model(**update)
new_model.save()
return new_model
def fix_ids(q):
json_obj = json.loads(q.to_json().replace('\"_id\"', '\"id\"'))
return json_obj
def create_from_json(json_file):
with open(json_file) as file:
data_json = json.load(file)
for category in data_json.get('categories', []):
name = category.get('name')
if name is not None:
upsert(CategoryModel, query={"name": name}, update=category)
for dataset_json in data_json.get('datasets', []):
name = dataset_json.get('name')
if name:
# map category names to ids; create as needed
category_ids = []
for category in dataset_json.get('categories', []):
category_obj = {"name": category}
category_model = upsert(CategoryModel, query=category_obj)
category_ids.append(category_model.id)
dataset_json['categories'] = category_ids
upsert(DatasetModel, query={ "name": name}, update=dataset_json)
import imantics as im
import json
from mongoengine import *
from .datasets import DatasetModel
from .categories import CategoryModel
from .events import Event
from flask_login import current_user
class AnnotationModel(DynamicDocument):
COCO_PROPERTIES = ["id", "image_id", "category_id", "segmentation",
"iscrowd", "color", "area", "bbox", "metadata",
"keypoints", "isbbox"]
id = SequenceField(primary_key=True)
image_id = IntField(required=True)
category_id = IntField(required=True)
dataset_id = IntField()
segmentation = ListField(default=[])
area = IntField(default=0)
bbox = ListField(default=[0, 0, 0, 0])
iscrowd = BooleanField(default=False)
isbbox = BooleanField(default=False)
creator = StringField(required=True)
width = IntField()
height = IntField()
color = StringField()
keypoints = ListField(default=[])
metadata = DictField(default={})
paper_object = ListField(default=[])
deleted = BooleanField(default=False)
deleted_date = DateTimeField()
milliseconds = IntField(default=0)
events = EmbeddedDocumentListField(Event)
def __init__(self, image_id=None, **data):
from .images import ImageModel
if image_id is not None:
image = ImageModel.objects(id=image_id).first()
if image is not None:
data['image_id'] = image_id
data['width'] = image.width
data['height'] = image.height
data['dataset_id'] = image.dataset_id
super(AnnotationModel, self).__init__(**data)
def save(self, copy=False, *args, **kwargs):
if self.dataset_id and not copy:
dataset = DatasetModel.objects(id=self.dataset_id).first()
if dataset is not None:
self.metadata = dataset.default_annotation_metadata.copy()
if self.color is None:
self.color = im.Color.random().hex
if current_user:
self.creator = current_user.username
else:
self.creator = 'system'
return super(AnnotationModel, self).save(*args, **kwargs)
def is_empty(self):
return len(self.segmentation) == 0 or self.area == 0
def mask(self):
""" Returns binary mask of annotation """
mask = np.zeros((self.height, self.width))
pts = [
np.array(anno).reshape(-1, 2).round().astype(int)
for anno in self.segmentation
]
mask = cv2.fillPoly(mask, pts, 1)
return mask
def clone(self):
""" Creates a clone """
create = json.loads(self.to_json())
del create['_id']
return AnnotationModel(**create)
def __call__(self):
category = CategoryModel.objects(id=self.category_id).first()
if category:
category = category()
data = {
'image': None,
'category': category,
'color': self.color,
'polygons': self.segmentation,
'width': self.width,
'height': self.height,
'metadata': self.metadata
}
return im.Annotation(**data)
def add_event(self, e):
self.update(push__events=e)
__all__ = ["AnnotationModel"]
from flask_login import current_user
from mongoengine import *
import imantics as im
class CategoryModel(DynamicDocument):
COCO_PROPERTIES = ["id", "name", "supercategory", "color", "metadata",\
"keypoint_edges", "keypoint_labels", "keypoint_colors"]
id = SequenceField(primary_key=True)
name = StringField(required=True, unique_with=['creator'])
supercategory = StringField(default='')
color = StringField(default=None)
metadata = DictField(default={})
creator = StringField(default='unknown')
deleted = BooleanField(default=False)
deleted_date = DateTimeField()
keypoint_edges = ListField(default=[])
keypoint_labels = ListField(default=[])
keypoint_colors = ListField(default=[])
@classmethod
def bulk_create(cls, categories):
if not categories:
return []
category_ids = []
for category in categories:
category_model = CategoryModel.objects(name=category).first()
if category_model is None:
new_category = CategoryModel(name=category)
new_category.save()
category_ids.append(new_category.id)
else:
category_ids.append(category_model.id)
return category_ids
def save(self, *args, **kwargs):
if not self.color:
self.color = im.Color.random().hex
if current_user:
self.creator = current_user.username
else:
self.creator = 'system'
return super(CategoryModel, self).save(*args, **kwargs)
def __call__(self):
""" Generates imantics category object """
data = {
'name': self.name,
'color': self.color,
'parent': self.supercategory,
'metadata': self.metadata,
'id': self.id
}
return im.Category(**data)
def is_owner(self, user):
if user.is_admin:
return True
return user.username.lower() == self.creator.lower()
def can_edit(self, user):
return self.is_owner(user)
def can_delete(self, user):
return self.is_owner(user)
__all__ = ["CategoryModel"]
\ No newline at end of file
from flask_login import current_user
from mongoengine import *
from config import Config
from .tasks import TaskModel
import os
class DatasetModel(DynamicDocument):
id = SequenceField(primary_key=True)
name = StringField(required=True, unique=True)
directory = StringField()
thumbnails = StringField()
categories = ListField(default=[])
owner = StringField(required=True)
users = ListField(default=[])
annotate_url = StringField(default="")
default_annotation_metadata = DictField(default={})
deleted = BooleanField(default=False)
deleted_date = DateTimeField()
def save(self, *args, **kwargs):
directory = os.path.join(Config.DATASET_DIRECTORY, self.name + '/')
os.makedirs(directory, mode=0o777, exist_ok=True)
self.directory = directory
self.owner = current_user.username if current_user else 'system'
return super(DatasetModel, self).save(*args, **kwargs)
def get_users(self):
from .users import UserModel
members = self.users
members.append(self.owner)
return UserModel.objects(username__in=members)\
.exclude('password', 'id', 'preferences')
def import_coco(self, coco_json):
from workers.tasks import import_annotations
task = TaskModel(
name="Import COCO format into {}".format(self.name),
dataset_id=self.id,
group="Annotation Import"
)
task.save()
cel_task = import_annotations.delay(task.id, self.id, coco_json)
return {
"celery_id": cel_task.id,
"id": task.id,
"name": task.name
}
def export_coco(self, categories=None, style="COCO", with_empty_images=False):
from workers.tasks import export_annotations
if categories is None or len(categories) == 0:
categories = self.categories
task = TaskModel(
name=f"Exporting {self.name} into {style} format",
dataset_id=self.id,
group="Annotation Export"
)
task.save()
cel_task = export_annotations.delay(task.id, self.id, categories, with_empty_images)
return {
"celery_id": cel_task.id,
"id": task.id,
"name": task.name
}
def scan(self):
from workers.tasks import scan_dataset
task = TaskModel(
name=f"Scanning {self.name} for new images",
dataset_id=self.id,
group="Directory Image Scan"
)
task.save()
cel_task = scan_dataset.delay(task.id, self.id)
return {
"celery_id": cel_task.id,
"id": task.id,
"name": task.name
}
def is_owner(self, user):
if user.is_admin:
return True
return user.username.lower() == self.owner.lower()
def can_download(self, user):
return self.is_owner(user)
def can_delete(self, user):
return self.is_owner(user)
def can_share(self, user):
return self.is_owner(user)
def can_generate(self, user):
return self.is_owner(user)
def can_edit(self, user):
return user.username in self.users or self.is_owner(user)
def permissions(self, user):
return {
'owner': self.is_owner(user),
'edit': self.can_edit(user),
'share': self.can_share(user),
'generate': self.can_generate(user),
'delete': self.can_delete(user),
'download': self.can_download(user)
}
__all__ = ["DatasetModel"]
from mongoengine import *
import datetime
import time
class Event(EmbeddedDocument):
name = StringField()
created_at = DateTimeField()
meta = {'allow_inheritance': True}
def now(self, event):
self.created_at = datetime.datetime.now()
class SessionEvent(Event):
user = StringField(required=True)
milliseconds = IntField(default=0, min_value=0)
tools_used = ListField(default=[])
@classmethod
def create(self, start, user, end=None, tools=[]):
if end is None:
end = time.time()
return SessionEvent(
user=user.username,
milliseconds=int((end-start)*1000)
)
__all__ = ["Event", "SessionEvent"]
\ No newline at end of file
from mongoengine import *
import datetime
import time
class ExportModel(DynamicDocument):
id = SequenceField(primary_key=True)
dataset_id = IntField(required=True)
path = StringField(required=True)
tags = ListField(default=[])
categories = ListField(default=[])
created_at = DateTimeField(default=datetime.datetime.utcnow)
def get_file(self):
return
__all__ = ["ExportModel"]
\ No newline at end of file
import os
import imantics as im
from PIL import Image, ImageFile
from mongoengine import *
from .events import Event, SessionEvent
from .datasets import DatasetModel
from .annotations import AnnotationModel
ImageFile.LOAD_TRUNCATED_IMAGES = True
class ImageModel(DynamicDocument):
COCO_PROPERTIES = ["id", "width", "height", "file_name", "path", "license",\
"flickr_url", "coco_url", "date_captured", "dataset_id"]
# -- Contants
THUMBNAIL_DIRECTORY = '.thumbnail'
PATTERN = (".gif", ".png", ".jpg", ".jpeg", ".bmp", ".tif", ".tiff", ".GIF", ".PNG", ".JPG", ".JPEG", ".BMP", ".TIF", ".TIFF")
# Set maximum thumbnail size (h x w) to use on dataset page
MAX_THUMBNAIL_DIM = (1024, 1024)
# -- Private
_dataset = None
# -- Database
id = SequenceField(primary_key=True)
dataset_id = IntField(required=True)
category_ids = ListField(default=[])
# Absolute path to image file
path = StringField(required=True, unique=True)
width = IntField(required=True)
height = IntField(required=True)
file_name = StringField()
# True if the image is annotated
annotated = BooleanField(default=False)
# Poeple currently annotation the image
annotating = ListField(default=[])
num_annotations = IntField(default=0)
thumbnail_url = StringField()
image_url = StringField()
coco_url = StringField()
date_captured = DateTimeField()
metadata = DictField()
license = IntField()
deleted = BooleanField(default=False)
deleted_date = DateTimeField()
milliseconds = IntField(default=0)
events = EmbeddedDocumentListField(Event)
regenerate_thumbnail = BooleanField(default=False)
@classmethod
def create_from_path(cls, path, dataset_id=None):
pil_image = Image.open(path)
image = cls()
image.file_name = os.path.basename(path)
image.path = path
image.width = pil_image.size[0]
image.height = pil_image.size[1]
image.regenerate_thumbnail = True
if dataset_id is not None:
image.dataset_id = dataset_id
else:
# Get dataset name from path
folders = path.split('/')
i = folders.index("datasets")
dataset_name = folders[i+1]
dataset = DatasetModel.objects(name=dataset_name).first()
if dataset is not None:
image.dataset_id = dataset.id
pil_image.close()
return image
def delete(self, *args, **kwargs):
self.thumbnail_delete()
AnnotationModel.objects(image_id=self.id).delete()
return super(ImageModel, self).delete(*args, **kwargs)
def thumbnail(self):
"""
Generates (if required) thumbnail
"""
thumbnail_path = self.thumbnail_path()
if self.regenerate_thumbnail:
pil_image = self.generate_thumbnail()
pil_image = pil_image.convert("RGB")
# Resize image to fit in MAX_THUMBNAIL_DIM envelope as necessary
pil_image.thumbnail((self.MAX_THUMBNAIL_DIM[1], self.MAX_THUMBNAIL_DIM[0]))
# Save as a jpeg to improve loading time
# (note file extension will not match but allows for backwards compatibility)
pil_image.save(thumbnail_path, "JPEG", quality=80, optimize=True, progressive=True)
self.update(is_modified=False)
return pil_image
def open_thumbnail(self):
"""
Return thumbnail
"""
thumbnail_path = self.thumbnail_path()
return Image.open(thumbnail_path)
def thumbnail_path(self):
folders = self.path.split('/')
folders.insert(len(folders)-1, self.THUMBNAIL_DIRECTORY)
path = '/' + os.path.join(*folders)
directory = os.path.dirname(path)
if not os.path.exists(directory):
os.makedirs(directory)
return path
def thumbnail_delete(self):
path = self.thumbnail_path()
if os.path.isfile(path):
os.remove(path)
def generate_thumbnail(self):
image = self().draw(color_by_category=True, bbox=False)
return Image.fromarray(image)
def flag_thumbnail(self, flag=True):
"""
Toggles values to regenerate thumbnail on next thumbnail request
"""
if self.regenerate_thumbnail != flag:
self.update(regenerate_thumbnail=flag)
def copy_annotations(self, annotations):
"""
Creates a copy of the annotations for this image
:param annotations: QuerySet of annotation models
:return: number of annotations
"""
annotations = annotations.filter(
width=self.width, height=self.height).exclude('events')
for annotation in annotations:
if annotation.area > 0 or len(annotation.keypoints) > 0:
clone = annotation.clone()
clone.dataset_id = self.dataset_id
clone.image_id = self.id
clone.save(copy=True)
return annotations.count()
@property
def dataset(self):
if self._dataset is None:
self._dataset = DatasetModel.objects(id=self.dataset_id).first()
return self._dataset
def __call__(self):
image = im.Image.from_path(self.path)
for annotation in AnnotationModel.objects(image_id=self.id, deleted=False).all():
if not annotation.is_empty():
image.add(annotation())
return image
def can_delete(self, user):
return user.can_delete(self.dataset)
def can_download(self, user):
return user.can_download(self.dataset)
# TODO: Fix why using the functions throws an error
def permissions(self, user):
return {
'delete': True,
'download': True
}
def add_event(self, e):
u = {
'push__events': e,
}
if isinstance(e, SessionEvent):
u['inc__milliseconds'] = e.milliseconds
self.update(**u)
__all__ = ["ImageModel"]
from mongoengine import *
class LicenseModel(DynamicDocument):
id = SequenceField(primary_key=True)
name = StringField()
url = StringField()
__all__ = ["LicenseModel"]
\ No newline at end of file
from mongoengine import *
import datetime
class TaskModel(DynamicDocument):
id = SequenceField(primary_key=True)
# Type of task: Importer, Exporter, Scanner, etc.
group = StringField(required=True)
name = StringField(required=True)
desciption = StringField()
status = StringField(default="PENDING")
creator = StringField()
#: Start date of the executor
start_date = DateTimeField()
#: End date of the executor
end_date = DateTimeField()
completed = BooleanField(default=False)
failed = BooleanField(default=False)
has_download = BooleanField(default=False)
# If any of the information is relevant to the task
# it should be added
dataset_id = IntField()
image_id = IntField()
category_id = IntField()
progress = FloatField(default=0, min_value=0, max_value=100)
logs = ListField(default=[])
errors = IntField(default=0)
warnings = IntField(default=0)
priority = IntField()
metadata = DictField(default={})
_update_every = 10
_progress_update = 0
def error(self, string):
self._log(string, level="ERROR")
def warning(self, string):
self._log(string, level="WARNING")
def info(self, string):
self._log(string, level="INFO")
def _log(self, string, level):
level = level.upper()
date = datetime.datetime.now().strftime("%d-%m-%Y %H:%M:%S")
message = f"[{date}] [{level}] {string}"
statment = {
'push__logs': message
}
if level == "ERROR":
statment['inc__errors'] = 1
self.errors += 1
if level == "WARNING":
statment['inc__warnings'] = 1
self.warnings += 1
self.update(**statment)
def set_progress(self, percent, socket=None):
self.update(progress=int(percent), completed=(percent >= 100))
# Send socket update every 10%
if self._progress_update < percent or percent >= 100:
if socket is not None:
# logger.debug(f"Emitting {percent} progress update for task {self.id}")
socket.emit('taskProgress', {
'id': self.id,
'progress': percent,
'errors': self.errors,
'warnings': self.warnings
}, broadcast=True)
self._progress_update += self._update_every
def api_json(self):
return {
"id": self.id,
"name": self.name
}
__all__ = ["TaskModel"]
\ No newline at end of file
import datetime
from mongoengine import *
from flask_login import UserMixin
from .annotations import AnnotationModel
from .categories import CategoryModel
from .datasets import DatasetModel
from .images import ImageModel
class UserModel(DynamicDocument, UserMixin):
password = StringField(required=True)
username = StringField(max_length=25, required=True, unique=True)
email = StringField(max_length=30)
name = StringField()
online = BooleanField(default=False)
last_seen = DateTimeField()
is_admin = BooleanField(default=False)
preferences = DictField(default={})
permissions = ListField(defualt=[])
# meta = {'allow_inheritance': True}
@property
def datasets(self):
self._update_last_seen()
if self.is_admin:
return DatasetModel.objects
return DatasetModel.objects(Q(owner=self.username) | Q(users__contains=self.username))
@property
def categories(self):
self._update_last_seen()
if self.is_admin:
return CategoryModel.objects
dataset_ids = self.datasets.distinct('categories')
return CategoryModel.objects(Q(id__in=dataset_ids) | Q(creator=self.username))
@property
def images(self):
self._update_last_seen()
if self.is_admin:
return ImageModel.objects
dataset_ids = self.datasets.distinct('id')
return ImageModel.objects(dataset_id__in=dataset_ids)
@property
def annotations(self):
self._update_last_seen()
if self.is_admin:
return AnnotationModel.objects
image_ids = self.images.distinct('id')
return AnnotationModel.objects(image_id__in=image_ids)
def can_view(self, model):
if model is None:
return False
return model.can_view(self)
def can_download(self, model):
if model is None:
return False
return model.can_download(self)
def can_delete(self, model):
if model is None:
return False
return model.can_delete(self)
def can_edit(self, model):
if model is None:
return False
return model.can_edit(self)
def _update_last_seen(self):
self.update(last_seen=datetime.datetime.utcnow())
__all__ = ["UserModel"]
\ No newline at end of file
eventlet==0.24.1
opencv-python==4.0.0.21
flask==1.0.2
flask-cors==3.0.7
flask-login==0.4.1
flask-restplus==0.12.1
flask-mongoengine==0.9.5
numpy
cython
scikit-image
requests
google_images_download==2.5.0
watchdog==0.8.3
pytest==3.9.3
pytest-ordering==0.6
imantics==0.1.9
flask-socketio==3.3.2
celery==4.2.2
Shapely==1.7.0
scipy
Pillow
matplotlib
keras==2.1.1
h5py
imgaug
IPython[all]
jupyter
import sys
paths = [
'/workspace/'
]
for path in paths:
if path not in sys.path:
sys.path.append(path)
FROM python:3.6
WORKDIR /workspace/
# Install python package dependices
COPY ./backend/ /workspace/
COPY ./.git /workspace/.git
RUN pip install -r requirements.txt &&\
pip install pycocotools
ENV LOGIN_DISABLED=true
CMD pytest
import json
import pytest
from database import CategoryModel
category1_id = 0
category2_id = 0
category3_id = 0
class TestCategory:
@classmethod
def setup_class(cls):
CategoryModel.objects.delete()
@pytest.mark.run(before='test_post_categories')
def test_get_empty(self, client):
response = client.get("/api/category/")
data = json.loads(response.data)
assert isinstance(data, list)
assert len(data) == 0
def test_post_no_data(self, client):
response = client.post("/api/category/")
assert response.status_code == 400
@pytest.mark.run(after="test_get_empty")
def test_post_categories(self, client):
global category1_id, category2_id, category3_id
# Category 1 Test
data = {
"name": "test1"
}
response = client.post("/api/category/", json=data)
r = json.loads(response.data)
assert response.status_code == 200
assert r.get("name") == data.get("name")
assert r.get("color") is not None
assert r.get("id") is not None
category1_id = r.get("id")
# Category 2 Test
data = {
"name": "test2",
"color": "white",
"metadata": {"key1": True, "key2": 1, "key3": "value"}
}
response = client.post("/api/category/", json=data)
r = json.loads(response.data)
assert response.status_code == 200
assert r.get("name") == data.get("name")
assert r.get("color") == data.get("color")
assert r.get("metadata") == data.get("metadata")
assert r.get("id") is not None
category2_id = r.get("id")
# Category 3 Test
data = {
"name": "test3"
}
response = client.post("/api/category/", json=data)
r = json.loads(response.data)
assert response.status_code == 200
assert r.get("name") == data.get("name")
assert r.get("metadata") is not None
assert r.get("id") is not None
category3_id = r.get("id")
def test_post_categories_invalid(self, client):
pass
@pytest.mark.run(after='test_post_categories')
def test_post_already_existing_category(self, client):
pass
class TestCategoryId:
@pytest.mark.run(after='test_post_categories')
def test_get(self, client):
response = client.get("/api/category/{}".format(category2_id))
r = json.loads(response.data)
assert response.status_code == 200
assert r.get("name") == "test2"
assert r.get("color") == "white"
def test_get_invalid_id(self, client):
response = client.get("/api/category/1000")
assert response.status_code == 400
def test_delete_invalid_id(self, client):
response = client.delete("/api/category/1000")
assert response.status_code == 400
@pytest.mark.run(after='test_post_categories')
def test_get(self, client):
response = client.delete("/api/category/{}".format(category3_id))
assert response.status_code == 200
@pytest.mark.run(after='test_post_categories')
def test_put_equal(self, client):
""" Test response when the name to update is the same as already stored """
data = {
"name": "test1"
}
response = client.put("/api/category/{}".format(category1_id), json=data)
assert response.status_code == 200
def test_put_invalid_id(self, client):
""" Test response when id does not exit """
response = client.put("/api/category/1000")
assert response.status_code == 400
def test_put_not_unique(self, client):
""" Test response when the name already exits """
data = {
"name": "test2"
}
response = client.put("/api/category/{}".format(category1_id), json=data)
assert response.status_code == 400
def test_put_empty(self, client):
""" Test response when category name is empty"""
data = {
"name": ""
}
response = client.put("/api/category/{}".format(category1_id), json=data)
assert response.status_code == 400
@pytest.mark.run(after='test_put_not_unique')
def test_put(self, client):
""" Test response when update is correct"""
data = {
"name": "test1_updated"
}
response = client.put("/api/category/{}".format(category1_id), json=data)
assert response.status_code == 200
@pytest.mark.run(after='test_put')
def test_put_reset(self, client):
""" Reset test after a correct update """
data = {
"name": "test1"
}
response = client.put("/api/category/{}".format(category1_id), json=data)
assert response.status_code == 200
class TestCategoryData:
# TODO write tests for data
def test(self):
pass
import json
class TestImage:
def test_get_empty(self, client):
response = client.get("/api/image/")
data = json.loads(response.data)
assert isinstance(data, dict)
assert data['total'] == 0
def test_post_no_data(self, client):
response = client.post("/api/image/")
assert response.status_code == 400
def test_post_images(self, client):
pass
def test_post_images_invalid(self, client):
pass
class TestImageId:
def test_get_invalid_id(self, client):
response = client.get("/api/image/1000")
assert response.status_code == 400
def test_delete_invalid_id(self, client):
response = client.delete("/api/image/1000")
assert response.status_code == 400
class TestImageCoco:
def test_get_invalid_id(self, client):
response = client.get("/api/image/1000/coco")
assert response.status_code == 400
import json
def test_info(client):
response = client.get('/api/info/')
data = json.loads(response.data)
assert data.get("git") is not None
import json
import pytest
from database import UserModel
@pytest.mark.second
class TestUser:
@classmethod
def setup_class(cls):
UserModel.objects.delete()
def test_create_first_user(self, client):
response = client.post("/api/user/register", json={
"username": "user",
"password": "pass"
})
data = json.loads(response.data)
assert data.get("success")
user = data.get("user")
assert user.get("is_admin")
import pytest
from webserver import app
@pytest.fixture
def client():
test_client = app.test_client()
return test_client
from database import CategoryModel, upsert
category1 = {
"name": "Upsert Category",
"color": "white"
}
class TestCategoryUpsert:
def test_create_category(self):
query = { "name": category1.get("name") }
create_category1 = upsert(CategoryModel, query=query, update=category1)
assert create_category1.name == category1.get("name")
assert create_category1.color == category1.get("color")
found = CategoryModel.objects(**query).first()
assert found.name == category1.get("name")
assert found.color == category1.get("color")
def test_update_category(self):
query = {"name": category1.get("name")}
set = {"name": "Upsert New", "color": "black"}
found = upsert(CategoryModel, query=query, update=set)
assert found.name == set.get("name")
assert found.color == set.get("color")
import json
import pytest
@pytest.mark.first
def test_api(client):
response = client.get('/api/swagger.json')
assert response is not None
data = json.loads(response.data)
endpoints = data.get('paths').keys()
assert len(endpoints) > 0
FROM jsbroks/coco-annotator:python-env
WORKDIR /workspace/
# Install python package dependices
COPY ./backend/ /workspace/
COPY ./.git /workspace/.git
RUN python set_path.py
ENV FLASK_ENV=development
ENV DEBUG=true
EXPOSE 5000
CMD gunicorn -c webserver/gunicorn_config.py webserver:app --no-sendfile
import eventlet
eventlet.monkey_patch(thread=False)
import sys
import workers
from config import Config
from database import (
connect_mongo,
ImageModel,
create_from_json
)
from flask import Flask
from flask_cors import CORS
from flask_socketio import SocketIO
from werkzeug.contrib.fixers import ProxyFix
from celery import Celery
from .watcher import run_watcher
from .api import blueprint as api
from .util import query_util, thumbnails
from .authentication import login_manager
from .sockets import socketio
import threading
import requests
import logging
import time
import os
connect_mongo('webserver')
def create_app():
if Config.FILE_WATCHER:
run_watcher()
flask = Flask(__name__,
static_url_path='',
static_folder='../dist')
flask.config.from_object(Config)
CORS(flask)
flask.wsgi_app = ProxyFix(flask.wsgi_app)
flask.register_blueprint(api)
login_manager.init_app(flask)
socketio.init_app(flask, message_queue=Config.CELERY_BROKER_URL)
# Remove all poeple who were annotating when
# the server shutdown
ImageModel.objects.update(annotating=[])
thumbnails.generate_thumbnails()
return flask
app = create_app()
logger = logging.getLogger('gunicorn.error')
app.logger.handlers = logger.handlers
app.logger.setLevel(logger.level)
if Config.INITIALIZE_FROM_FILE:
create_from_json(Config.INITIALIZE_FROM_FILE)
@app.route('/', defaults={'path': ''})
@app.route('/<path:path>')
def index(path):
if app.debug:
return requests.get('http://frontend:8080/{}'.format(path)).text
return app.send_static_file('index.html')
from flask import Blueprint
from flask_restplus import Api
from .annotations import api as ns_annotations
from .categories import api as ns_categories
from .annotator import api as ns_annotator
from .datasets import api as ns_datasets
from .exports import api as ns_exports
from .images import api as ns_images
from .models import api as ns_models
from .users import api as ns_users
from .admin import api as ns_admin
from .tasks import api as ns_tasks
from .undo import api as ns_undo
from .info import api as ns_info
from config import Config
# Create /api/ space
blueprint = Blueprint('api', __name__, url_prefix='/api')
api = Api(
blueprint,
title=Config.NAME,
version=Config.VERSION,
)
# Remove default namespace
api.namespaces.pop(0)
# Setup API namespaces
api.add_namespace(ns_info)
api.add_namespace(ns_users)
api.add_namespace(ns_images)
api.add_namespace(ns_annotations)
api.add_namespace(ns_categories)
api.add_namespace(ns_datasets)
api.add_namespace(ns_exports)
api.add_namespace(ns_tasks)
api.add_namespace(ns_undo)
api.add_namespace(ns_models)
api.add_namespace(ns_admin)
api.add_namespace(ns_annotator)
from flask_login import login_required, current_user
from flask_restplus import Namespace, Resource, reqparse
from werkzeug.security import generate_password_hash
from database import UserModel
from ..util.query_util import fix_ids
api = Namespace('admin', description='Admin related operations')
users = reqparse.RequestParser()
users.add_argument('limit', type=int, default=50)
users.add_argument('page', type=int, default=1)
create_user = reqparse.RequestParser()
create_user.add_argument('name', default="", location='json')
create_user.add_argument('password', default="", location='json')
register = reqparse.RequestParser()
register.add_argument('username', required=True, location='json')
register.add_argument('password', required=True, location='json')
register.add_argument('email', location='json')
register.add_argument('name', location='json')
register.add_argument('isAdmin', type=bool, default=False, location='json')
@api.route('/users')
class Users(Resource):
@api.expect(users)
@login_required
def get(self):
""" Get list of all users """
if not current_user.is_admin:
return {"success": False, "message": "Access denied"}, 401
args = users.parse_args()
per_page = args['limit']
page = args['page']-1
user_model = UserModel.objects
total = user_model.count()
pages = int(total/per_page) + 1
user_model = user_model.skip(page*per_page).limit(per_page).exclude("preferences", "password")
return {
"total": total,
"pages": pages,
"page": page,
"per_page": per_page,
"users": fix_ids(user_model.all())
}
@api.route('/user/')
class User(Resource):
@login_required
@api.expect(register)
def post(self):
""" Create a new user """
if not current_user.is_admin:
return {"success": False, "message": "Access denied"}, 401
args = register.parse_args()
username = args.get('username')
if UserModel.objects(username__iexact=username).first():
return {'success': False, 'message': 'Username already exists.'}, 400
user = UserModel()
user.username = args.get('username')
user.password = generate_password_hash(args.get('password'), method='sha256')
user.name = args.get('name', "")
user.email = args.get('email', "")
user.is_admin = args.get('isAdmin', False)
user.save()
user_json = fix_ids(current_user)
del user_json['password']
return {'success': True, 'user': user_json}
@api.route('/user/<string:username>')
class Username(Resource):
@login_required
def get(self, username):
""" Get a users """
if not current_user.is_admin:
return {"success": False, "message": "Access denied"}, 401
user = UserModel.objects(username__iexact=username).first()
if user is None:
return {"success": False, "message": "User not found"}, 400
return fix_ids(user)
@api.expect(create_user)
@login_required
def patch(self, username):
""" Edit a user """
if not current_user.is_admin:
return {"success": False, "message": "Access denied"}, 401
user = UserModel.objects(username__iexact=username).first()
if user is None:
return {"success": False, "message": "User not found"}, 400
args = create_user.parse_args()
name = args.get('name')
if len(name) > 0:
user.name = name
password = args.get('password')
if len(password) > 0:
user.password = generate_password_hash(password, method='sha256')
user.save()
return fix_ids(user)
@login_required
def delete(self, username):
""" Delete a user """
if not current_user.is_admin:
return {"success": False, "message": "Access denied"}, 401
user = UserModel.objects(username__iexact=username).first()
if user is None:
return {"success": False, "message": "User not found"}, 400
user.delete()
return {"success": True}
from flask_restplus import Namespace, Resource, reqparse
from flask_login import login_required, current_user
from database import AnnotationModel
from ..util import query_util
import datetime
import logging
logger = logging.getLogger('gunicorn.error')
api = Namespace('annotation', description='Annotation related operations')
create_annotation = reqparse.RequestParser()
create_annotation.add_argument(
'image_id', type=int, required=True, location='json')
create_annotation.add_argument('category_id', type=int, location='json')
create_annotation.add_argument('isbbox', type=bool, location='json')
create_annotation.add_argument('metadata', type=dict, location='json')
create_annotation.add_argument('segmentation', type=list, location='json')
create_annotation.add_argument('keypoints', type=list, location='json')
create_annotation.add_argument('color', location='json')
update_annotation = reqparse.RequestParser()
update_annotation.add_argument('category_id', type=int, location='json')
@api.route('/')
class Annotation(Resource):
@login_required
def get(self):
""" Returns all annotations """
return query_util.fix_ids(current_user.annotations.exclude("paper_object").all())
@api.expect(create_annotation)
@login_required
def post(self):
""" Creates an annotation """
args = create_annotation.parse_args()
image_id = args.get('image_id')
category_id = args.get('category_id')
isbbox = args.get('isbbox')
metadata = args.get('metadata', {})
segmentation = args.get('segmentation', [])
keypoints = args.get('keypoints', [])
image = current_user.images.filter(id=image_id, deleted=False).first()
if image is None:
return {"message": "Invalid image id"}, 400
logger.info(
f'{current_user.username} has created an annotation for image {image_id} with {isbbox}')
logger.info(
f'{current_user.username} has created an annotation for image {image_id}')
try:
annotation = AnnotationModel(
image_id=image_id,
category_id=category_id,
metadata=metadata,
segmentation=segmentation,
keypoints=keypoints,
isbbox=isbbox
)
annotation.save()
except (ValueError, TypeError) as e:
return {'message': str(e)}, 400
return query_util.fix_ids(annotation)
@api.route('/<int:annotation_id>')
class AnnotationId(Resource):
@login_required
def get(self, annotation_id):
""" Returns annotation by ID """
annotation = current_user.annotations.filter(id=annotation_id).first()
if annotation is None:
return {"message": "Invalid annotation id"}, 400
return query_util.fix_ids(annotation)
@login_required
def delete(self, annotation_id):
""" Deletes an annotation by ID """
annotation = current_user.annotations.filter(id=annotation_id).first()
if annotation is None:
return {"message": "Invalid annotation id"}, 400
image = current_user.images.filter(
id=annotation.image_id, deleted=False).first()
image.flag_thumbnail()
annotation.update(set__deleted=True,
set__deleted_date=datetime.datetime.now())
return {'success': True}
@api.expect(update_annotation)
@login_required
def put(self, annotation_id):
""" Updates an annotation by ID """
annotation = current_user.annotations.filter(id=annotation_id).first()
if annotation is None:
return { "message": "Invalid annotation id" }, 400
args = update_annotation.parse_args()
new_category_id = args.get('category_id')
annotation.update(category_id=new_category_id)
logger.info(
f'{current_user.username} has updated category for annotation (id: {annotation.id})'
)
newAnnotation = current_user.annotations.filter(id=annotation_id).first()
return query_util.fix_ids(newAnnotation)
# @api.route('/<int:annotation_id>/mask')
# class AnnotationMask(Resource):
# def get(self, annotation_id):
# """ Returns the binary mask of an annotation """
# return query_util.fix_ids(AnnotationModel.objects(id=annotation_id).first())
import datetime
from flask_restplus import Namespace, Resource
from flask_login import login_required, current_user
from flask import request
from ..util import query_util, coco_util, profile, thumbnails
from config import Config
from database import (
ImageModel,
CategoryModel,
AnnotationModel,
SessionEvent
)
api = Namespace('annotator', description='Annotator related operations')
@api.route('/data')
class AnnotatorData(Resource):
@profile
@login_required
def post(self):
"""
Called when saving data from the annotator client
"""
data = request.get_json(force=True)
image = data.get('image')
dataset = data.get('dataset')
image_id = image.get('id')
image_model = ImageModel.objects(id=image_id).first()
if image_model is None:
return {'success': False, 'message': 'Image does not exist'}, 400
# Check if current user can access dataset
db_dataset = current_user.datasets.filter(id=image_model.dataset_id).first()
if dataset is None:
return {'success': False, 'message': 'Could not find associated dataset'}
db_dataset.update(annotate_url=dataset.get('annotate_url', ''))
categories = CategoryModel.objects.all()
annotations = AnnotationModel.objects(image_id=image_id)
current_user.update(preferences=data.get('user', {}))
annotated = False
num_annotations = 0
# Iterate every category passed in the data
for category in data.get('categories', []):
category_id = category.get('id')
# Find corresponding category object in the database
db_category = categories.filter(id=category_id).first()
if db_category is None:
continue
category_update = {'color': category.get('color')}
if current_user.can_edit(db_category):
category_update['keypoint_edges'] = category.get('keypoint_edges', [])
category_update['keypoint_labels'] = category.get('keypoint_labels', [])
category_update['keypoint_colors'] = category.get('keypoint_colors', [])
db_category.update(**category_update)
# Iterate every annotation from the data annotations
for annotation in category.get('annotations', []):
counted = False
# Find corresponding annotation object in database
annotation_id = annotation.get('id')
db_annotation = annotations.filter(id=annotation_id).first()
if db_annotation is None:
continue
# Paperjs objects are complex, so they will not always be passed. Therefor we update
# the annotation twice, checking if the paperjs exists.
# Update annotation in database
sessions = []
total_time = 0
for session in annotation.get('sessions', []):
date = datetime.datetime.fromtimestamp(int(session.get('start')) / 1e3)
model = SessionEvent(
user=current_user.username,
created_at=date,
milliseconds=session.get('milliseconds'),
tools_used=session.get('tools')
)
total_time += session.get('milliseconds')
sessions.append(model)
keypoints = annotation.get('keypoints', [])
if keypoints:
counted = True
db_annotation.update(
add_to_set__events=sessions,
inc__milliseconds=total_time,
set__isbbox=annotation.get('isbbox', False),
set__keypoints=keypoints,
set__metadata=annotation.get('metadata'),
set__color=annotation.get('color')
)
paperjs_object = annotation.get('compoundPath', [])
# Update paperjs if it exists
if len(paperjs_object) == 2:
width = db_annotation.width
height = db_annotation.height
# Generate coco formatted segmentation data
segmentation, area, bbox = coco_util.\
paperjs_to_coco(width, height, paperjs_object)
db_annotation.update(
set__segmentation=segmentation,
set__area=area,
set__isbbox=annotation.get('isbbox', False),
set__bbox=bbox,
set__paper_object=paperjs_object,
)
if area > 0:
counted = True
if counted:
num_annotations += 1
image_model.update(
set__metadata=image.get('metadata', {}),
set__annotated=(num_annotations > 0),
set__category_ids=image.get('category_ids', []),
set__regenerate_thumbnail=True,
set__num_annotations=num_annotations
)
thumbnails.generate_thumbnail(image_model)
return {"success": True}
@api.route('/data/<int:image_id>')
class AnnotatorId(Resource):
@profile
@login_required
def get(self, image_id):
""" Called when loading from the annotator client """
image = ImageModel.objects(id=image_id)\
.exclude('events').first()
if image is None:
return {'success': False, 'message': 'Could not load image'}, 400
dataset = current_user.datasets.filter(id=image.dataset_id).first()
if dataset is None:
return {'success': False, 'message': 'Could not find associated dataset'}, 400
categories = CategoryModel.objects(deleted=False)\
.in_bulk(dataset.categories).items()
# Get next and previous image
images = ImageModel.objects(dataset_id=dataset.id, deleted=False)
pre = images.filter(file_name__lt=image.file_name).order_by('-file_name').first()
nex = images.filter(file_name__gt=image.file_name).order_by('file_name').first()
preferences = {}
if not Config.LOGIN_DISABLED:
preferences = current_user.preferences
# Generate data about the image to return to client
data = {
'image': query_util.fix_ids(image),
'categories': [],
'dataset': query_util.fix_ids(dataset),
'preferences': preferences,
'permissions': {
'dataset': dataset.permissions(current_user),
'image': image.permissions(current_user)
}
}
data['image']['previous'] = pre.id if pre else None
data['image']['next'] = nex.id if nex else None
# Optimize query: query all annotation of specific image, and then categorize them according to the categories.
all_annotations = AnnotationModel.objects(image_id=image_id, deleted=False).exclude('events').all()
for category in categories:
category = query_util.fix_ids(category[1])
category_id = category.get('id')
annotations = []
for annotation in all_annotations:
if annotation['category_id'] == category_id:
annotations.append(query_util.fix_ids(annotation))
category['show'] = True
category['visualize'] = False
category['annotations'] = [] if annotations is None else annotations
data.get('categories').append(category)
return data
from flask_restplus import Namespace, Resource, reqparse
from flask_login import login_required, current_user
from mongoengine.errors import NotUniqueError
from ..util.pagination_util import Pagination
from ..util import query_util
from database import CategoryModel, AnnotationModel
import datetime
api = Namespace('category', description='Category related operations')
create_category = reqparse.RequestParser()
create_category.add_argument('name', required=True, location='json')
create_category.add_argument('supercategory', location='json')
create_category.add_argument('color', location='json')
create_category.add_argument('metadata', type=dict, location='json')
create_category.add_argument(
'keypoint_edges', type=list, default=[], location='json')
create_category.add_argument(
'keypoint_labels', type=list, default=[], location='json')
create_category.add_argument(
'keypoint_colors', type=list, default=[], location='json')
update_category = reqparse.RequestParser()
update_category.add_argument('name', required=True, location='json')
update_category.add_argument('supercategory', location='json')
update_category.add_argument('color', location='json')
update_category.add_argument('metadata', type=dict, location='json')
update_category.add_argument('keypoint_edges', type=list, location='json')
update_category.add_argument('keypoint_labels', type=list, location='json')
update_category.add_argument('keypoint_colors', type=list, location='json')
page_data = reqparse.RequestParser()
page_data.add_argument('page', default=1, type=int)
page_data.add_argument('limit', default=20, type=int)
@api.route('/')
class Category(Resource):
@login_required
def get(self):
""" Returns all categories """
return query_util.fix_ids(current_user.categories.all())
@api.expect(create_category)
@login_required
def post(self):
""" Creates a category """
args = create_category.parse_args()
name = args.get('name')
supercategory = args.get('supercategory')
metadata = args.get('metadata', {})
color = args.get('color')
keypoint_edges = args.get('keypoint_edges')
keypoint_labels = args.get('keypoint_labels')
keypoint_colors = args.get('keypoint_colors')
try:
category = CategoryModel(
name=name,
supercategory=supercategory,
color=color,
metadata=metadata,
keypoint_edges=keypoint_edges,
keypoint_labels=keypoint_labels,
keypoint_colors=keypoint_colors,
)
category.save()
except NotUniqueError as e:
return {'message': 'Category already exists. Check the undo tab to fully delete the category.'}, 400
return query_util.fix_ids(category)
@api.route('/<int:category_id>')
class Category(Resource):
@login_required
def get(self, category_id):
""" Returns a category by ID """
category = current_user.categories.filter(id=category_id).first()
if category is None:
return {'success': False}, 400
return query_util.fix_ids(category)
@login_required
def delete(self, category_id):
""" Deletes a category by ID """
category = current_user.categories.filter(id=category_id).first()
if category is None:
return {"message": "Invalid image id"}, 400
if not current_user.can_delete(category):
return {"message": "You do not have permission to delete this category"}, 403
category.update(set__deleted=True,
set__deleted_date=datetime.datetime.now())
return {'success': True}
@api.expect(update_category)
@login_required
def put(self, category_id):
""" Updates a category name by ID """
category = current_user.categories.filter(id=category_id).first()
# check if the id exits
if category is None:
return {"message": "Invalid category id"}, 400
args = update_category.parse_args()
name = args.get('name')
supercategory = args.get('supercategory', category.supercategory)
color = args.get('color', category.color)
metadata = args.get('metadata', category.metadata)
keypoint_edges = args.get('keypoint_edges', category.keypoint_edges)
keypoint_labels = args.get('keypoint_labels', category.keypoint_labels)
keypoint_colors = args.get('keypoint_colors', category.keypoint_colors)
# check if there is anything to update
if category.name == name \
and category.supercategory == supercategory \
and category.color == color \
and category.keypoint_edges == keypoint_edges \
and category.keypoint_labels == keypoint_labels \
and category.keypoint_colors == keypoint_colors:
return {"message": "Nothing to update"}, 200
# check if the name is empty
if not name:
return {"message": "Invalid category name to update"}, 400
# update name of the category
# check if the name to update exits already in db
# @ToDo: Is it necessary to allow equal category names among different creators?
category.name = name
category.supercategory = supercategory
category.color = color
category.keypoint_edges = keypoint_edges
category.keypoint_labels = keypoint_labels
category.keypoint_colors = keypoint_colors
try:
category.update(
name=category.name,
supercategory=category.supercategory,
color=category.color,
metadata=category.metadata,
keypoint_edges=category.keypoint_edges,
keypoint_labels=category.keypoint_labels,
keypoint_colors=category.keypoint_colors,
)
except NotUniqueError:
# it is only triggered when the name already exists and the creator is the same
return {"message": "Category '" + name_to_update + "' already exits"}, 400
return {"success": True}
@api.route('/data')
class CategoriesData(Resource):
@api.expect(page_data)
@login_required
def get(self):
""" Endpoint called by category viewer client """
args = page_data.parse_args()
limit = args['limit']
page = args['page']
categories = current_user.categories.filter(deleted=False)
pagination = Pagination(categories.count(), limit, page)
categories = query_util.fix_ids(
categories[pagination.start:pagination.end])
for category in categories:
category['numberAnnotations'] = AnnotationModel.objects(
deleted=False, category_id=category.get('id')).count()
return {
"pagination": pagination.export(),
"page": page,
"categories": categories
}
This diff is collapsed.
from flask import send_file
from flask_restplus import Namespace, Resource, reqparse
from flask_login import login_required, current_user
import datetime
from ..util import query_util
from database import (
ExportModel,
DatasetModel,
fix_ids
)
api = Namespace('export', description='Export related operations')
@api.route('/<int:export_id>')
class DatasetExports(Resource):
@login_required
def get(self, export_id):
""" Returns exports """
export = ExportModel.objects(id=export_id).first()
if export is None:
return {"message": "Invalid export ID"}, 400
dataset = current_user.datasets.filter(id=export.dataset_id).first()
if dataset is None:
return {"message": "Invalid dataset ID"}, 400
time_delta = datetime.datetime.utcnow() - export.created_at
d = fix_ids(export)
d['ago'] = query_util.td_format(time_delta)
return d
@login_required
def delete(self, export_id):
""" Returns exports """
export = ExportModel.objects(id=export_id).first()
if export is None:
return {"message": "Invalid export ID"}, 400
dataset = current_user.datasets.filter(id=export.dataset_id).first()
if dataset is None:
return {"message": "Invalid dataset ID"}, 400
export.delete()
return {'success': True}
@api.route('/<int:export_id>/download')
class DatasetExports(Resource):
@login_required
def get(self, export_id):
""" Returns exports """
export = ExportModel.objects(id=export_id).first()
if export is None:
return {"message": "Invalid export ID"}, 400
dataset = current_user.datasets.filter(id=export.dataset_id).first()
if dataset is None:
return {"message": "Invalid dataset ID"}, 400
if not current_user.can_download(dataset):
return {"message": "You do not have permission to download the dataset's annotations"}, 403
return send_file(export.path, attachment_filename=f"{dataset.name.encode('utf-8')}-{'-'.join(export.tags).encode('utf-8')}.json", as_attachment=True)
from flask_restplus import Namespace, Resource, reqparse
from flask_login import login_required, current_user
from werkzeug.datastructures import FileStorage
from flask import send_file
from mongoengine.errors import NotUniqueError
from ..util import query_util, coco_util
from database import (
ImageModel,
DatasetModel,
AnnotationModel
)
from PIL import Image
import datetime
import os
import io
api = Namespace('image', description='Image related operations')
image_all = reqparse.RequestParser()
image_all.add_argument('fields', required=False, type=str)
image_all.add_argument('page', default=1, type=int)
image_all.add_argument('per_page', default=50, type=int, required=False)
image_upload = reqparse.RequestParser()
image_upload.add_argument('image', location='files',
type=FileStorage, required=True,
help='PNG or JPG file')
image_upload.add_argument('dataset_id', required=True, type=int,
help='Id of dataset to insert image into')
image_download = reqparse.RequestParser()
image_download.add_argument('asAttachment', type=bool, default=False)
image_download.add_argument('thumbnail', type=bool, default=False)
image_download.add_argument('width', type=int)
image_download.add_argument('height', type=int)
copy_annotations = reqparse.RequestParser()
copy_annotations.add_argument('category_ids', location='json', type=list,
required=False, default=None, help='Categories to copy')
@api.route('/')
class Images(Resource):
@api.expect(image_all)
@login_required
def get(self):
""" Returns all images """
args = image_all.parse_args()
per_page = args['per_page']
page = args['page']-1
fields = args.get('fields', '')
images = current_user.images.filter(deleted=False)
total = images.count()
pages = int(total/per_page) + 1
images = images.skip(page*per_page).limit(per_page)
if fields:
images = images.only(*fields.split(','))
return {
"total": total,
"pages": pages,
"page": page,
"fields": fields,
"per_page": per_page,
"images": query_util.fix_ids(images.all())
}
@api.expect(image_upload)
@login_required
def post(self):
""" Creates an image """
args = image_upload.parse_args()
image = args['image']
dataset_id = args['dataset_id']
try:
dataset = DatasetModel.objects.get(id=dataset_id)
except:
return {'message': 'dataset does not exist'}, 400
directory = dataset.directory
path = os.path.join(directory, image.filename)
if os.path.exists(path):
return {'message': 'file already exists'}, 400
pil_image = Image.open(io.BytesIO(image.read()))
pil_image.save(path)
image.close()
pil_image.close()
try:
db_image = ImageModel.create_from_path(path, dataset_id).save()
except NotUniqueError:
db_image = ImageModel.objects.get(path=path)
return db_image.id
@api.route('/<int:image_id>')
class ImageId(Resource):
@api.expect(image_download)
@login_required
def get(self, image_id):
""" Returns category by ID """
args = image_download.parse_args()
as_attachment = args.get('asAttachment')
thumbnail = args.get('thumbnail')
image = current_user.images.filter(id=image_id, deleted=False).first()
if image is None:
return {'success': False}, 400
width = args.get('width')
height = args.get('height')
if not width:
width = image.width
if not height:
height = image.height
pil_image = image.open_thumbnail() if thumbnail else Image.open(image.path)
pil_image.thumbnail((width, height), Image.ANTIALIAS)
image_io = io.BytesIO()
pil_image = pil_image.convert("RGB")
pil_image.save(image_io, "JPEG", quality=90)
image_io.seek(0)
return send_file(image_io, attachment_filename=image.file_name, as_attachment=as_attachment)
@login_required
def delete(self, image_id):
""" Deletes an image by ID """
image = current_user.images.filter(id=image_id, deleted=False).first()
if image is None:
return {"message": "Invalid image id"}, 400
if not current_user.can_delete(image):
return {"message": "You do not have permission to download the image"}, 403
image.update(set__deleted=True, set__deleted_date=datetime.datetime.now())
return {"success": True}
@api.route('/copy/<int:from_id>/<int:to_id>/annotations')
class ImageCopyAnnotations(Resource):
@api.expect(copy_annotations)
@login_required
def post(self, from_id, to_id):
args = copy_annotations.parse_args()
category_ids = args.get('category_ids')
image_from = current_user.images.filter(id=from_id).first()
image_to = current_user.images.filter(id=to_id).first()
if image_from is None or image_to is None:
return {'success': False, 'message': 'Invalid image ids'}, 400
if image_from == image_to:
return {'success': False, 'message': 'Cannot copy self'}, 400
if image_from.width != image_to.width or image_from.height != image_to.height:
return {'success': False, 'message': 'Image sizes do not match'}, 400
if category_ids is None:
category_ids = DatasetModel.objects(id=image_from.dataset_id).first().categories
query = AnnotationModel.objects(
image_id=image_from.id,
category_id__in=category_ids,
deleted=False
)
return {'annotations_created': image_to.copy_annotations(query)}
@api.route('/<int:image_id>/coco')
class ImageCoco(Resource):
@login_required
def get(self, image_id):
""" Returns coco of image and annotations """
image = current_user.images.filter(id=image_id).exclude('deleted_date').first()
if image is None:
return {"message": "Invalid image ID"}, 400
if not current_user.can_download(image):
return {"message": "You do not have permission to download the images's annotations"}, 403
return coco_util.get_image_coco(image_id)
from flask_restplus import Namespace, Resource
from workers.tasks import long_task
from config import Config
from database import UserModel, TaskModel
api = Namespace('info', description='Software related operations')
@api.route('/')
class Info(Resource):
def get(self):
""" Returns information about current version """
return {
"name": "COCO Annotator",
"author": "Justin Brooks",
"demo": "https://annotator.justinbrooks.ca/",
"repo": "https://github.com/jsbroks/coco-annotator",
"git": {
"tag": Config.VERSION
},
"login_enabled": not Config.LOGIN_DISABLED,
"total_users": UserModel.objects.count(),
"allow_registration": Config.ALLOW_REGISTRATION
}
@api.route('/long_task')
class TaskTest(Resource):
def get(self):
""" Returns information about current version """
task_model = TaskModel(group="test", name="Testing Celery")
task_model.save()
task = long_task.delay(20, task_model.id)
return {'id': task.id, 'state': task.state}
\ No newline at end of file
from flask_restplus import Namespace, Resource, reqparse
from werkzeug.datastructures import FileStorage
from imantics import Mask
from flask_login import login_required
from config import Config
from PIL import Image
from database import ImageModel
import os
import logging
logger = logging.getLogger('gunicorn.error')
MASKRCNN_LOADED = os.path.isfile(Config.MASK_RCNN_FILE)
if MASKRCNN_LOADED:
from ..util.mask_rcnn import model as maskrcnn
else:
logger.warning("MaskRCNN model is disabled.")
DEXTR_LOADED = os.path.isfile(Config.DEXTR_FILE)
if DEXTR_LOADED:
from ..util.dextr import model as dextr
else:
logger.warning("DEXTR model is disabled.")
api = Namespace('model', description='Model related operations')
image_upload = reqparse.RequestParser()
image_upload.add_argument('image', location='files', type=FileStorage, required=True, help='Image')
dextr_args = reqparse.RequestParser()
dextr_args.add_argument('points', location='json', type=list, required=True)
dextr_args.add_argument('padding', location='json', type=int, default=50)
dextr_args.add_argument('threshold', location='json', type=int, default=80)
@api.route('/dextr/<int:image_id>')
class MaskRCNN(Resource):
@login_required
@api.expect(dextr_args)
def post(self, image_id):
""" COCO data test """
if not DEXTR_LOADED:
return {"disabled": True, "message": "DEXTR is disabled"}, 400
args = dextr_args.parse_args()
points = args.get('points')
# padding = args.get('padding')
# threshold = args.get('threshold')
if len(points) != 4:
return {"message": "Invalid points entered"}, 400
image_model = ImageModel.objects(id=image_id).first()
if not image_model:
return {"message": "Invalid image ID"}, 400
image = Image.open(image_model.path)
result = dextr.predict_mask(image, points)
return { "segmentaiton": Mask(result).polygons().segmentation }
@api.route('/maskrcnn')
class MaskRCNN(Resource):
@login_required
@api.expect(image_upload)
def post(self):
""" COCO data test """
if not MASKRCNN_LOADED:
return {"disabled": True, "coco": {}}
args = image_upload.parse_args()
im = Image.open(args.get('image'))
coco = maskrcnn.detect(im)
return {"coco": coco}
\ No newline at end of file
from flask_restplus import Namespace, Resource
from flask_login import login_required
from ..util import query_util
from database import TaskModel
api = Namespace('tasks', description='Task related operations')
@api.route('/')
class Task(Resource):
@login_required
def get(self):
""" Returns all tasks """
query = TaskModel.objects.only(
'group', 'id', 'name', 'completed', 'progress',
'priority', 'creator', 'desciption', 'errors',
'warnings'
).all()
return query_util.fix_ids(query)
@api.route('/<int:task_id>')
class TaskId(Resource):
@login_required
def delete(self, task_id):
""" Deletes task """
task = TaskModel.objects(id=task_id).first()
if task is None:
return {"message": "Invalid task id"}, 400
if not task.completed:
return {"message": "Task is not completed"}, 400
task.delete()
return {"success": True}
@api.route('/<int:task_id>/logs')
class TaskId(Resource):
@login_required
def get(self, task_id):
""" Deletes task """
task = TaskModel.objects(id=task_id).first()
if task is None:
return {"message": "Invalid task id"}, 400
return {'logs': task.logs}
from flask_restplus import Namespace, Resource, reqparse
from flask_login import login_required
import os
import shutil
import datetime
from database import (
ImageModel,
DatasetModel,
CategoryModel,
AnnotationModel
)
api = Namespace('undo', description='Undo related operations')
model_list = reqparse.RequestParser()
model_list.add_argument('type', type=str, location='args', default="all")
model_list.add_argument('limit', type=int, location='args', default=50)
model_data = reqparse.RequestParser()
model_data.add_argument('id', type=int, required=True)
model_data.add_argument('instance', required=True)
models = [
(CategoryModel, "category"),
(AnnotationModel, "annotation"),
(ImageModel, "image"),
(DatasetModel, "dataset")
]
@api.route('/list/')
class Undo(Resource):
@api.expect(model_list)
@login_required
def get(self):
""" Returns all partially delete models """
args = model_list.parse_args()
model_type = args['type']
n = max(1, min(args['limit'], 1000))
data = []
for model in models:
if model_type == "all" or model_type == model[1]:
data.extend(model_undo(model[0], model[1], limit=n))
data.sort(key=lambda item: item['date'], reverse=True)
for model in data:
model['date'] = str(model['date'])
if len(data) > n:
data = data[:n]
return data
@api.route('/')
class Undo(Resource):
@api.expect(model_data)
@login_required
def post(self):
""" Undo a partial delete give id and instance """
args = model_data.parse_args()
model_id = args['id']
instance = args['instance']
model_instance = None
for model in models:
if model[1].lower() == instance:
model_instance = model[0]
if model_instance is None:
return {"message": "Instance not found"}, 400
model_object = model_instance.objects(id=model_id).first()
if model_object is None:
return {"message": "Invalid id"}, 400
model_object.update(set__deleted=False)
return {"success": True}
@api.expect(model_data)
@login_required
def delete(self):
""" Undo a partial delete give id and instance """
args = model_data.parse_args()
model_id = args['id']
instance = args['instance']
model_instance = None
for model in models:
if model[1].lower() == instance:
model_instance = model[0]
if model_instance is None:
return {"message": "Instance not found"}, 400
model_object = model_instance.objects(id=model_id).first()
if model_object is None:
return {"message": "Invalid id"}, 400
if isinstance(model_object, ImageModel):
if os.path.isfile(model_object.path):
os.remove(model_object.path)
if isinstance(model_object, DatasetModel):
if os.path.isdir(model_object.directory):
shutil.rmtree(model_object.directory)
model_object.delete()
return {"success": True}
def model_undo(model_instance, instance_name, limit=50):
models = model_instance.objects(deleted=True).order_by('-deleted_date').limit(limit)
new_models = []
for model in models:
if model.deleted_date is None:
continue
name = model.name if hasattr(model, 'name') else '-'
name = model.file_name if hasattr(model, 'file_name') and name == '-' else name
time_delta = datetime.datetime.now() - model.deleted_date
new_model = {
'id': model.id,
'name': '-' if name is None else name,
'instance': instance_name,
'ago': td_format(time_delta),
'date': model.deleted_date
}
new_models.append(new_model)
return new_models
def td_format(td_object):
seconds = int(td_object.total_seconds())
periods = [
('year', 60*60*24*365),
('month', 60*60*24*30),
('day', 60*60*24),
('hour', 60*60),
('minute', 60),
('second', 1)
]
strings = []
for period_name, period_seconds in periods:
if seconds > period_seconds:
period_value, seconds = divmod(seconds, period_seconds)
has_s = 's' if period_value > 1 else ''
strings.append("%s %s%s" % (period_value, period_name, has_s))
break
return ", ".join(strings)
from flask_login import login_user, login_required, logout_user, current_user
from werkzeug.security import generate_password_hash, check_password_hash
from flask_restplus import Namespace, Resource, reqparse
from database import UserModel
from config import Config
from ..util.query_util import fix_ids
import logging
logger = logging.getLogger('gunicorn.error')
api = Namespace('user', description='User related operations')
register = reqparse.RequestParser()
register.add_argument('username', required=True, location='json')
register.add_argument('password', required=True, location='json')
register.add_argument('email', location='json')
register.add_argument('name', location='json')
login = reqparse.RequestParser()
login.add_argument('password', required=True, location='json')
login.add_argument('username', required=True, location='json')
set_password = reqparse.RequestParser()
set_password.add_argument('password', required=True, location='json')
set_password.add_argument('new_password', required=True, location='json')
@api.route('/')
class User(Resource):
@login_required
def get(self):
""" Get information of current user """
if Config.LOGIN_DISABLED:
return current_user.to_json()
user_json = fix_ids(current_user)
del user_json['password']
return {'user': user_json}
@api.route('/password')
class UserPassword(Resource):
@login_required
@api.expect(register)
def post(self):
""" Set password of current user """
args = set_password.parse_args()
if check_password_hash(current_user.password, args.get('password')):
current_user.update(password=generate_password_hash(args.get('new_password'), method='sha256'), new=False)
return {'success': True}
return {'success': False, 'message': 'Password does not match current passowrd'}, 400
@api.route('/register')
class UserRegister(Resource):
@api.expect(register)
def post(self):
""" Creates user """
users = UserModel.objects.count()
if not Config.ALLOW_REGISTRATION and users != 0:
return {'success': False, 'message': 'Registration of new accounts is disabled.'}, 400
args = register.parse_args()
username = args.get('username')
if UserModel.objects(username__iexact=username).first():
return {'success': False, 'message': 'Username already exists.'}, 400
user = UserModel()
user.username = args.get('username')
user.password = generate_password_hash(args.get('password'), method='sha256')
user.name = args.get('name')
user.email = args.get('email')
if users == 0:
user.is_admin = True
user.save()
login_user(user)
user_json = fix_ids(current_user)
del user_json['password']
return {'success': True, 'user': user_json}
@api.route('/login')
class UserLogin(Resource):
@api.expect(login)
def post(self):
""" Logs user in """
args = login.parse_args()
username = args.get('username')
user = UserModel.objects(username__iexact=username).first()
if user is None:
return {'success': False, 'message': 'Could not authenticate user'}, 400
if check_password_hash(user.password, args.get('password')):
login_user(user)
user_json = fix_ids(current_user)
del user_json['password']
logger.info(f'User {current_user.username} has LOGIN')
return {'success': True, 'user': user_json}
return {'success': False, 'message': 'Could not authenticate user'}, 400
@api.route('/logout')
class UserLogout(Resource):
@login_required
def get(self):
""" Logs user out """
logger.info(f'User {current_user.username} has LOGOUT')
logout_user()
return {'success': True}
from flask_login import LoginManager, AnonymousUserMixin
from werkzeug.security import check_password_hash
from database import (
UserModel,
DatasetModel,
CategoryModel,
AnnotationModel,
ImageModel
)
login_manager = LoginManager()
class AnonymousUser(AnonymousUserMixin):
@property
def datasets(self):
return DatasetModel.objects
@property
def categories(self):
return CategoryModel.objects
@property
def annotations(self):
return AnnotationModel.objects
@property
def images(self):
return ImageModel.objects
@property
def username(self):
return "anonymous"
@property
def name(self):
return "Anonymous User"
@property
def is_admin(self):
return False
def update(self, *args, **kwargs):
pass
def to_json(self):
return {
"admin": False,
"username": self.username,
"name": self.name,
"is_admin": self.is_admin,
"anonymous": True
}
def can_edit(self, model):
return True
def can_view(self, model):
return True
def can_download(self, model):
return True
def can_delete(self, model):
return True
login_manager.anonymous_user = AnonymousUser
@login_manager.user_loader
def load_user(user_id):
return UserModel.objects(id=user_id).first()
@login_manager.unauthorized_handler
def unauthorized():
return {'success': False, 'message': 'Authorization required'}, 401
@login_manager.request_loader
def load_user_from_request(request):
auth = request.authorization
if not auth:
return None
user = UserModel.objects(username__iexact=auth.username).first()
if user and check_password_hash(user.password, auth.password):
# login_user(user)
return user
return None
from config import Config
bind = '0.0.0.0:5000'
backlog = 2048
workers = 1
worker_class = 'eventlet'
worker_connections = 1000
timeout = 30
keepalive = 2
reload = Config.DEBUG
preload = Config.PRELOAD
errorlog = '-'
loglevel = Config.LOG_LEVEL
accesslog = None
\ No newline at end of file
import functools
import time
from flask import session
from flask_socketio import (
SocketIO,
disconnect,
join_room,
leave_room,
emit
)
from flask_login import current_user
from database import ImageModel, SessionEvent
from config import Config
import logging
logger = logging.getLogger('gunicorn.error')
socketio = SocketIO()
def authenticated_only(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
if current_user.is_authenticated or Config.LOGIN_DISABLED:
return f(*args, **kwargs)
else:
disconnect()
return wrapped
@socketio.on('annotation')
@authenticated_only
def annotation(data):
emit('annotation', data, broadcast=True)
@socketio.on('annotating')
@authenticated_only
def annotating(data):
"""
Socket for handling image locking and time logging
"""
image_id = data.get('image_id')
active = data.get('active')
image = ImageModel.objects(id=image_id).first()
if image is None:
# invalid image ID
return
emit('annotating', {
'image_id': image_id,
'active': active,
'username': current_user.username
}, broadcast=True, include_self=False)
if active:
logger.info(f'{current_user.username} has started annotating image {image_id}')
# Remove user from pervious room
previous = session.get('annotating')
if previous is not None:
leave_room(previous)
previous_image = ImageModel.objects(id=previous).first()
if previous_image is not None:
start = session.get('annotating_time', time.time())
event = SessionEvent.create(start, current_user)
previous_image.add_event(event)
previous_image.update(
pull__annotating=current_user.username
)
emit('annotating', {
'image_id': previous,
'active': False,
'username': current_user.username
}, broadcast=True, include_self=False)
join_room(image_id)
session['annotating'] = image_id
session['annotating_time'] = time.time()
image.update(add_to_set__annotating=current_user.username)
else:
leave_room(image_id)
start = session.get('annotating_time', time.time())
event = SessionEvent.create(start, current_user)
image.add_event(event)
image.update(
pull__annotating=current_user.username
)
session['annotating'] = None
session['time'] = None
@socketio.on('connect')
def connect():
logger.info(f'Socket connection created with {current_user.username}')
@socketio.on('disconnect')
def disconnect():
if current_user.is_authenticated:
logger.info(f'Socket connection has been disconnected with {current_user.username}')
image_id = session.get('annotating')
# Remove user from room
if image_id is not None:
image = ImageModel.objects(id=image_id).first()
if image is not None:
start = session.get('annotating_time', time.time())
event = SessionEvent.create(start, current_user)
image.add_event(event)
image.update(
pull__annotating=current_user.username
)
emit('annotating', {
'image_id': image_id,
'active': False,
'username': current_user.username
}, broadcast=True, include_self=False)
import time
import logging
def profile(func):
def wrap(*args, **kwargs):
started_at = time.time()
result = func(*args, **kwargs)
diff = time.time() - started_at
if isinstance(result, dict):
result['time_ms'] = int(diff * 1000)
return result
return wrap
\ No newline at end of file
This diff is collapsed.
from dextr import DEXTR
from config import Config
model = DEXTR(nb_classes=1, resnet_layers=101, input_shape=(512, 512), weights_path=Config.DEXTR_FILE,
num_input_channels=4, classifier='psp', sigmoid=True)
from config import Config as AnnotatorConfig
from skimage.transform import resize
import imantics as im
from keras.preprocessing.image import img_to_array
from mrcnn.config import Config
import mrcnn.model as modellib
import logging
logger = logging.getLogger('gunicorn.error')
MODEL_DIR = "/workspace/models"
COCO_MODEL_PATH = AnnotatorConfig.MASK_RCNN_FILE
CLASS_NAMES = AnnotatorConfig.MASK_RCNN_CLASSES.split(',')
class CocoConfig(Config):
"""
Configuration for COCO Dataset.
"""
NAME = "coco"
GPU_COUNT = 1
IMAGES_PER_GPU = 1
NUM_CLASSES = len(CLASS_NAMES)
class MaskRCNN():
def __init__(self):
self.config = CocoConfig()
self.model = modellib.MaskRCNN(
mode="inference",
model_dir=MODEL_DIR,
config=self.config
)
try:
self.model.load_weights(COCO_MODEL_PATH, by_name=True)
self.model.keras_model._make_predict_function()
logger.info(f"Loaded MaskRCNN model: {COCO_MODEL_PATH}")
except:
logger.error(f"Could not load MaskRCNN model (place 'mask_rcnn_coco.h5' in the models directory)")
self.model = None
def detect(self, image):
if self.model is None:
return {}
image = image.convert('RGB')
width, height = image.size
image.thumbnail((1024, 1024))
image = img_to_array(image)
result = self.model.detect([image])[0]
masks = result.get('masks')
class_ids = result.get('class_ids')
coco_image = im.Image(width=width, height=height)
for i in range(masks.shape[-1]):
mask = resize(masks[..., i], (height, width))
mask = im.Mask(mask)
class_id = class_ids[i]
class_name = CLASS_NAMES[class_id]
category = im.Category(class_name)
coco_image.add(mask, category=category)
return coco_image.coco()
model = MaskRCNN()
class Pagination:
start = 0
end = 0
def __init__(self, length, limit, current_page=1):
self.length = length
self.limit = limit
self.pages = int((length - 1) / limit) + 1
self.current_page = current_page
self.calculate_start_end(current_page)
def calculate_start_end(self, current_page):
self.current_page = current_page
if current_page > self.pages:
self.current_page = self.pages
if current_page < 1:
current_page = 1
self.start = (current_page - 1) * self.limit
self.end = self.start + self.limit
if self.length < self.end:
self.end = self.length
def export(self):
return {
"start": self.start,
"end": self.end,
"pages": self.pages,
"page": self.current_page,
"total": self.length,
"showing": self.end - self.start
}
import json
def fix_ids(objs):
objects_list = json.loads(objs.to_json().replace('\"_id\"', '\"id\"'))
return objects_list
def td_format(td_object):
seconds = int(td_object.total_seconds())
periods = [
('year', 60*60*24*365),
('month', 60*60*24*30),
('day', 60*60*24),
('hour', 60*60),
('minute', 60),
('second', 1)
]
strings = []
for period_name, period_seconds in periods:
if seconds > period_seconds:
period_value, seconds = divmod(seconds, period_seconds)
has_s = 's' if period_value > 1 else ''
strings.append("%s %s%s" % (period_value, period_name, has_s))
break
return ", ".join(strings)
from database import ImageModel
def generate_thumbnails():
PREFIX = "[Thumbnails]"
print(f'{PREFIX} Sending request for regenerating images with non actual thumbnails', flush=True)
[generate_thumbnail(image) for image in ImageModel.objects(regenerate_thumbnail=True).all()]
def generate_thumbnail(image):
from workers.tasks import thumbnail_generate_single_image
thumbnail_generate_single_image.delay(image.id)
import subprocess
import requests
COMMITS = "https://api.github.com/repos/jsbroks/coco-annotator/commits/{}"
COMPARE = "https://api.github.com/repos/jsbroks/coco-annotator/compare/{}...{}"
CURRENT_VERSION = ""
LATEST_VERSION = ""
def get_tag():
result = subprocess.run(["git", "describe", "--abbrev=0", "--tags"], stdout=subprocess.PIPE)
return str(result.stdout.decode("utf-8")).strip()
def get_current():
result = subprocess.run(["git", "rev-parse", "HEAD"], stdout=subprocess.PIPE)
return str(result.stdout.decode("utf-8")).strip()
def get_branch():
result = subprocess.run(["git", "rev-parse", "--abbrev-ref", "HEAD"], stdout=subprocess.PIPE)
return str(result.stdout.decode("utf-8")).strip()
class VersionControl:
def __init__(self):
self.valid = True
self.branch = get_branch()
self.current_version = get_current()
self.tag = get_tag()
self.latest_version = self.get_latest()
self.commits_behind = self.get_commits_behind()
def is_latest(self):
if len(self.current_version) > 0 and len(self.latest_version):
return self.current_version == self.latest_version
return False
def get_latest(self):
r = requests.get(COMMITS.format(self.branch))
if r.status_code != requests.codes.ok:
self.valid = False
return ""
return r.json().get('sha')
def get_commits_behind(self):
if self.current_version == self.latest_version or not self.valid:
return 0
r = requests.get(COMPARE.format(self.latest_version, self.current_version))
if r.status_code != requests.codes.ok:
self.valid = False
return 0
return r.json().get('behind_by')
from watchdog.events import FileSystemEventHandler
from watchdog.observers import Observer
from config import Config
from database import ImageModel
from .util.thumbnails import generate_thumbnail
import re
class ImageFolderHandler(FileSystemEventHandler):
PREFIX = "[File Watcher]"
def __init__(self, pattern=None):
self.pattern = pattern or ImageModel.PATTERN
def on_any_event(self, event):
path = event.dest_path if event.event_type == "moved" else event.src_path
if event.is_directory:
# Listen to directory events as some file systems don't generate
# per-file `deleted` events when moving/deleting directories
if event.event_type == 'deleted':
self._log(f'Deleting images from database {path}')
ImageModel.objects(path=re.compile('^' + re.escape(path))).delete()
return
if (
# check if its a hidden file
bool(re.search(r'\/\..*?\/', path))
or not path.lower().endswith(self.pattern)
):
return
self._log(f'File {path} for {event.event_type}')
image = ImageModel.objects(path=event.src_path).first()
if image is None and event.event_type != 'deleted':
self._log(f'Adding new file to database: {path}')
image = ImageModel.create_from_path(path).save()
generate_thumbnail(image)
elif event.event_type == 'moved':
self._log(f'Moving image from {event.src_path} to {path}')
image.update(path=path)
generate_thumbnail(image)
elif event.event_type == 'deleted':
self._log(f'Deleting image from database {path}')
ImageModel.objects(path=path).delete()
def _log(self, message):
print(f'{self.PREFIX} {message}', flush=True)
def run_watcher():
observer = Observer()
observer.schedule(ImageFolderHandler(), Config.DATASET_DIRECTORY, recursive=True)
observer.start()
FROM jsbroks/coco-annotator:python-env
WORKDIR /workspace/
# Install python package dependices
COPY ./backend/ /workspace/
EXPOSE 5555
CMD celery -A workers worker -l info
from celery import Celery
from config import Config
from database import connect_mongo
connect_mongo('Celery_Worker')
celery = Celery(
Config.NAME,
backend=Config.CELERY_RESULT_BACKEND,
broker=Config.CELERY_BROKER_URL
)
celery.autodiscover_tasks(['workers.tasks'])
if __name__ == '__main__':
celery.start()
from config import Config
from flask_socketio import SocketIO
def create_socket():
return SocketIO(message_queue=Config.CELERY_BROKER_URL)
from .data import *
from .test import *
from .scan import *
from .thumbnails import *
\ No newline at end of file
This diff is collapsed.
This diff is collapsed.
from celery import shared_task
from database import TaskModel
from ..socket import create_socket
@shared_task
def long_task(n, task_id):
task = TaskModel.objects.get(id=task_id)
task.update(status="PROGRESS")
socketio = create_socket()
print(f"This task will take {n} seconds")
import time
for i in range(n):
print(i)
time.sleep(1)
socketio.emit('test', i)
return n
__all__ = ["long_task"]
\ No newline at end of file
from database import ImageModel
from celery import task
@task
def thumbnail_generate_single_image(image_id):
image = ImageModel.objects(id=image_id).first()
image.thumbnail()
image.flag_thumbnail(flag=False)
__all__ = ["thumbnail_generate_single_image"]
\ No newline at end of file
docker build -f ./backend/Dockerfile . -t jsbroks/coco-annotator:python-env --no-cache
docker build . -t annotator_webclient_gpu --no-cache
docker build -f ./backend/workers/Dockerfile . -t annotator_workers_gpu --no-cache
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
module.exports = {
presets: ["@vue/app"]
};
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment