Skip to content
Snippets Groups Projects
Unverified Commit 1b5c991f authored by Kai Chen's avatar Kai Chen Committed by GitHub
Browse files

Update pre-commit hook config and fix styles (#2182)

* update pre-commit hook config

* fix styles conflicting with pre-commit hooks
parent 3839478d
No related branches found
No related tags found
No related merge requests found
......@@ -17,9 +17,9 @@ class RPNTestMixin(object):
if sys.version_info >= (3, 7):
async def async_test_rpn(self, x, img_meta, rpn_test_cfg):
sleep_interval = rpn_test_cfg.pop("async_sleep_interval", 0.025)
sleep_interval = rpn_test_cfg.pop('async_sleep_interval', 0.025)
async with completed(
__name__, "rpn_head_forward",
__name__, 'rpn_head_forward',
sleep_interval=sleep_interval):
rpn_outs = self.rpn_head(x)
......@@ -75,10 +75,10 @@ class BBoxTestMixin(object):
x[:len(self.bbox_roi_extractor.featmap_strides)], rois)
if self.with_shared_head:
roi_feats = self.shared_head(roi_feats)
sleep_interval = rcnn_test_cfg.get("async_sleep_interval", 0.017)
sleep_interval = rcnn_test_cfg.get('async_sleep_interval', 0.017)
async with completed(
__name__, "bbox_head_forward",
__name__, 'bbox_head_forward',
sleep_interval=sleep_interval):
cls_score, bbox_pred = self.bbox_head(roi_feats)
......@@ -191,7 +191,7 @@ class MaskTestMixin(object):
sleep_interval = 0.035
async with completed(
__name__,
"mask_head_forward",
'mask_head_forward',
sleep_interval=sleep_interval):
mask_pred = self.mask_head(mask_feats)
segm_result = self.mask_head.get_seg_masks(
......
......@@ -266,7 +266,7 @@ class TwoStageDetector(BaseDetector, RPNTestMixin, BBoxTestMixin,
proposals=None,
rescale=False):
"""Async test without augmentation."""
assert self.with_bbox, "Bbox head must be implemented."
assert self.with_bbox, 'Bbox head must be implemented.'
x = self.extract_feat(img)
if proposals is None:
......@@ -294,7 +294,7 @@ class TwoStageDetector(BaseDetector, RPNTestMixin, BBoxTestMixin,
def simple_test(self, img, img_meta, proposals=None, rescale=False):
"""Test without augmentation."""
assert self.with_bbox, "Bbox head must be implemented."
assert self.with_bbox, 'Bbox head must be implemented.'
x = self.extract_feat(img)
......
......@@ -164,7 +164,7 @@ int deform_conv_forward_cuda(at::Tensor input, at::Tensor weight,
shape_check(input, offset, NULL, weight, kH, kW, dH, dW, padH, padW,
dilationH, dilationW, group, deformable_group);
at::DeviceGuard guard(input.device());
input = input.contiguous();
offset = offset.contiguous();
weight = weight.contiguous();
......@@ -386,7 +386,7 @@ int deform_conv_backward_parameters_cuda(
shape_check(input, offset, &gradOutput, gradWeight, kH, kW, dH, dW, padH,
padW, dilationH, dilationW, group, deformable_group);
at::DeviceGuard guard(input.device());
input = input.contiguous();
offset = offset.contiguous();
gradOutput = gradOutput.contiguous();
......@@ -497,7 +497,7 @@ void modulated_deform_conv_cuda_forward(
AT_CHECK(input.is_contiguous(), "input tensor has to be contiguous");
AT_CHECK(weight.is_contiguous(), "weight tensor has to be contiguous");
at::DeviceGuard guard(input.device());
const int batch = input.size(0);
const int channels = input.size(1);
const int height = input.size(2);
......
......@@ -73,4 +73,4 @@ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
"masked_im2col forward (CUDA)");
m.def("masked_col2im_forward", &masked_col2im_forward_cuda,
"masked_col2im forward (CUDA)");
}
\ No newline at end of file
}
......@@ -14,4 +14,4 @@ at::Tensor nms(const at::Tensor& dets, const float threshold) {
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("nms", &nms, "non-maximum suppression");
}
\ No newline at end of file
}
......@@ -58,6 +58,6 @@ def collect_env():
return env_info
if __name__ == "__main__":
if __name__ == '__main__':
for name, val in collect_env().items():
print('{}: {}'.format(name, val))
# coding: utf-8
import asyncio
import contextlib
import logging
......
......@@ -35,7 +35,7 @@ if sys.version_info >= (3, 7):
end.synchronize()
cpu_time = (cpu_end - cpu_start) * 1000
gpu_time = start.elapsed_time(end)
msg = "{} {} cpu_time {:.2f} ms ".format(trace_name, name,
msg = '{} {} cpu_time {:.2f} ms '.format(trace_name, name,
cpu_time)
msg += "gpu_time {:.2f} ms stream {}".format(gpu_time, stream)
msg += 'gpu_time {:.2f} ms stream {}'.format(gpu_time, stream)
print(msg, end_stream)
# -*- coding: utf-8 -*-
"""
This module defines the :class:`NiceRepr` mixin class, which defines a
``__repr__`` and ``__str__`` method that only depend on a custom ``__nice__``
......
-r requirements/runtime.txt
-r requirements/build.txt
-r requirements/optional.txt
-r requirements/runtime.txt
-r requirements/tests.txt
-r requirements/build.txt
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import subprocess
import time
......@@ -93,7 +92,7 @@ def make_cuda_ext(name, module, sources):
define_macros = []
if torch.cuda.is_available() or os.getenv('FORCE_CUDA', '0') == '1':
define_macros += [("WITH_CUDA", None)]
define_macros += [('WITH_CUDA', None)]
else:
raise EnvironmentError('CUDA is required to compile MMDetection!')
......
# coding: utf-8
import asyncio
import os
import shutil
......
......@@ -18,7 +18,7 @@ class AsyncTestCase(asynctest.TestCase):
use_default_loop = False
forbid_get_event_loop = True
TEST_TIMEOUT = int(os.getenv("ASYNCIO_TEST_TIMEOUT", "30"))
TEST_TIMEOUT = int(os.getenv('ASYNCIO_TEST_TIMEOUT', '30'))
def _run_test_method(self, method):
result = method()
......@@ -33,7 +33,7 @@ class MaskRCNNDetector:
model_config,
checkpoint=None,
streamqueue_size=3,
device="cuda:0"):
device='cuda:0'):
self.streamqueue_size = streamqueue_size
self.device = device
......@@ -66,13 +66,13 @@ class AsyncInferenceTestCase(AsyncTestCase):
if not torch.cuda.is_available():
import pytest
pytest.skip("test requires GPU and torch+cuda")
pytest.skip('test requires GPU and torch+cuda')
root_dir = os.path.dirname(os.path.dirname(__name__))
model_config = os.path.join(root_dir,
"configs/mask_rcnn_r50_fpn_1x.py")
'configs/mask_rcnn_r50_fpn_1x.py')
detector = MaskRCNNDetector(model_config)
await detector.init()
img_path = os.path.join(root_dir, "demo/demo.jpg")
img_path = os.path.join(root_dir, 'demo/demo.jpg')
bboxes, _ = await detector.apredict(img_path)
self.assertTrue(bboxes)
......@@ -196,7 +196,7 @@ def get_distortions_from_file(filename):
def get_distortions_from_results(eval_output):
distortions = []
for i, distortion in enumerate(eval_output):
distortions.append(distortion.replace("_", " "))
distortions.append(distortion.replace('_', ' '))
return distortions
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment