Skip to content
Snippets Groups Projects
Commit 59fbb5c1 authored by simon wu's avatar simon wu Committed by Kai Chen
Browse files

Fixing build-errors on Windows (#969)

* Fixing build-errors on Windows (add some compile args, which are default on Linux)

* Fixing build-errors on Windows

* minor formatting

* Fixing errors on windows

1.Adding 'cxx' key for extra_compile_args
2.Adding type-cast to long for first parameter of THCCeilDiv facebookresearch/maskrcnn-benchmark#409
3.Ignoring resource setting on windows

* update the order of import statements
parent cb0dd8ee
No related branches found
No related tags found
No related merge requests found
import platform
from functools import partial
from mmcv.runner import get_dist_info
......@@ -6,10 +7,11 @@ from torch.utils.data import DataLoader
from .sampler import GroupSampler, DistributedGroupSampler, DistributedSampler
# https://github.com/pytorch/pytorch/issues/973
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (4096, rlimit[1]))
if platform.system() != 'Windows':
# https://github.com/pytorch/pytorch/issues/973
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (4096, rlimit[1]))
def build_dataloader(dataset,
......
......@@ -109,7 +109,7 @@ at::Tensor SigmoidFocalLoss_forward_cuda(const at::Tensor &logits,
auto losses = at::empty({num_samples, logits.size(1)}, logits.options());
auto losses_size = num_samples * logits.size(1);
dim3 grid(std::min(THCCeilDiv(losses_size, 512L), 4096L));
dim3 grid(std::min(THCCeilDiv((long)losses_size, 512L), 4096L));
dim3 block(512);
if (losses.numel() == 0) {
......@@ -147,7 +147,7 @@ at::Tensor SigmoidFocalLoss_backward_cuda(const at::Tensor &logits,
auto d_logits = at::zeros({num_samples, num_classes}, logits.options());
auto d_logits_size = num_samples * logits.size(1);
dim3 grid(std::min(THCCeilDiv(d_logits_size, 512L), 4096L));
dim3 grid(std::min(THCCeilDiv((long)d_logits_size, 512L), 4096L));
dim3 block(512);
if (d_logits.numel() == 0) {
......
import os
import platform
import subprocess
import time
from setuptools import Extension, find_packages, setup
......@@ -88,18 +89,30 @@ def make_cuda_ext(name, module, sources):
return CUDAExtension(
name='{}.{}'.format(module, name),
sources=[os.path.join(*module.split('.'), p) for p in sources])
sources=[os.path.join(*module.split('.'), p) for p in sources],
extra_compile_args={
'cxx': [],
'nvcc': [
'-D__CUDA_NO_HALF_OPERATORS__',
'-D__CUDA_NO_HALF_CONVERSIONS__',
'-D__CUDA_NO_HALF2_OPERATORS__',
]
})
def make_cython_ext(name, module, sources):
extra_compile_args = None
if platform.system() != 'Windows':
extra_compile_args = {
'cxx': ['-Wno-unused-function', '-Wno-write-strings']
}
extension = Extension(
'{}.{}'.format(module, name),
[os.path.join(*module.split('.'), p) for p in sources],
include_dirs=[np.get_include()],
language='c++',
extra_compile_args={
'cxx': ['-Wno-unused-function', '-Wno-write-strings']
})
extra_compile_args=extra_compile_args)
extension, = cythonize(extension)
return extension
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment