Skip to content
Snippets Groups Projects
Commit 59fbb5c1 authored by simon wu's avatar simon wu Committed by Kai Chen
Browse files

Fixing build-errors on Windows (#969)

* Fixing build-errors on Windows (add some compile args, which are default on Linux)

* Fixing build-errors on Windows

* minor formatting

* Fixing errors on windows

1.Adding 'cxx' key for extra_compile_args
2.Adding type-cast to long for first parameter of THCCeilDiv facebookresearch/maskrcnn-benchmark#409
3.Ignoring resource setting on windows

* update the order of import statements
parent cb0dd8ee
No related branches found
No related tags found
No related merge requests found
import platform
from functools import partial from functools import partial
from mmcv.runner import get_dist_info from mmcv.runner import get_dist_info
...@@ -6,10 +7,11 @@ from torch.utils.data import DataLoader ...@@ -6,10 +7,11 @@ from torch.utils.data import DataLoader
from .sampler import GroupSampler, DistributedGroupSampler, DistributedSampler from .sampler import GroupSampler, DistributedGroupSampler, DistributedSampler
# https://github.com/pytorch/pytorch/issues/973 if platform.system() != 'Windows':
import resource # https://github.com/pytorch/pytorch/issues/973
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE) import resource
resource.setrlimit(resource.RLIMIT_NOFILE, (4096, rlimit[1])) rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (4096, rlimit[1]))
def build_dataloader(dataset, def build_dataloader(dataset,
......
...@@ -109,7 +109,7 @@ at::Tensor SigmoidFocalLoss_forward_cuda(const at::Tensor &logits, ...@@ -109,7 +109,7 @@ at::Tensor SigmoidFocalLoss_forward_cuda(const at::Tensor &logits,
auto losses = at::empty({num_samples, logits.size(1)}, logits.options()); auto losses = at::empty({num_samples, logits.size(1)}, logits.options());
auto losses_size = num_samples * logits.size(1); auto losses_size = num_samples * logits.size(1);
dim3 grid(std::min(THCCeilDiv(losses_size, 512L), 4096L)); dim3 grid(std::min(THCCeilDiv((long)losses_size, 512L), 4096L));
dim3 block(512); dim3 block(512);
if (losses.numel() == 0) { if (losses.numel() == 0) {
...@@ -147,7 +147,7 @@ at::Tensor SigmoidFocalLoss_backward_cuda(const at::Tensor &logits, ...@@ -147,7 +147,7 @@ at::Tensor SigmoidFocalLoss_backward_cuda(const at::Tensor &logits,
auto d_logits = at::zeros({num_samples, num_classes}, logits.options()); auto d_logits = at::zeros({num_samples, num_classes}, logits.options());
auto d_logits_size = num_samples * logits.size(1); auto d_logits_size = num_samples * logits.size(1);
dim3 grid(std::min(THCCeilDiv(d_logits_size, 512L), 4096L)); dim3 grid(std::min(THCCeilDiv((long)d_logits_size, 512L), 4096L));
dim3 block(512); dim3 block(512);
if (d_logits.numel() == 0) { if (d_logits.numel() == 0) {
......
import os import os
import platform
import subprocess import subprocess
import time import time
from setuptools import Extension, find_packages, setup from setuptools import Extension, find_packages, setup
...@@ -88,18 +89,30 @@ def make_cuda_ext(name, module, sources): ...@@ -88,18 +89,30 @@ def make_cuda_ext(name, module, sources):
return CUDAExtension( return CUDAExtension(
name='{}.{}'.format(module, name), name='{}.{}'.format(module, name),
sources=[os.path.join(*module.split('.'), p) for p in sources]) sources=[os.path.join(*module.split('.'), p) for p in sources],
extra_compile_args={
'cxx': [],
'nvcc': [
'-D__CUDA_NO_HALF_OPERATORS__',
'-D__CUDA_NO_HALF_CONVERSIONS__',
'-D__CUDA_NO_HALF2_OPERATORS__',
]
})
def make_cython_ext(name, module, sources): def make_cython_ext(name, module, sources):
extra_compile_args = None
if platform.system() != 'Windows':
extra_compile_args = {
'cxx': ['-Wno-unused-function', '-Wno-write-strings']
}
extension = Extension( extension = Extension(
'{}.{}'.format(module, name), '{}.{}'.format(module, name),
[os.path.join(*module.split('.'), p) for p in sources], [os.path.join(*module.split('.'), p) for p in sources],
include_dirs=[np.get_include()], include_dirs=[np.get_include()],
language='c++', language='c++',
extra_compile_args={ extra_compile_args=extra_compile_args)
'cxx': ['-Wno-unused-function', '-Wno-write-strings']
})
extension, = cythonize(extension) extension, = cythonize(extension)
return extension return extension
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment