diff --git a/mmdet/datasets/loader/build_loader.py b/mmdet/datasets/loader/build_loader.py
index 8759b0612638479247e1a536eca895224aebcbd0..1843713738ef417b17aa090b2d69562b131a8626 100644
--- a/mmdet/datasets/loader/build_loader.py
+++ b/mmdet/datasets/loader/build_loader.py
@@ -1,3 +1,4 @@
+import platform
 from functools import partial
 
 from mmcv.runner import get_dist_info
@@ -6,10 +7,11 @@ from torch.utils.data import DataLoader
 
 from .sampler import GroupSampler, DistributedGroupSampler, DistributedSampler
 
-# https://github.com/pytorch/pytorch/issues/973
-import resource
-rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
-resource.setrlimit(resource.RLIMIT_NOFILE, (4096, rlimit[1]))
+if platform.system() != 'Windows':
+    # https://github.com/pytorch/pytorch/issues/973
+    import resource
+    rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
+    resource.setrlimit(resource.RLIMIT_NOFILE, (4096, rlimit[1]))
 
 
 def build_dataloader(dataset,
diff --git a/mmdet/ops/sigmoid_focal_loss/src/sigmoid_focal_loss_cuda.cu b/mmdet/ops/sigmoid_focal_loss/src/sigmoid_focal_loss_cuda.cu
index aa1e4b9d8e311797f1d104cae0cba0873f761fbc..7b9b8050834f7cf64e6e9efaeda7262a00906679 100644
--- a/mmdet/ops/sigmoid_focal_loss/src/sigmoid_focal_loss_cuda.cu
+++ b/mmdet/ops/sigmoid_focal_loss/src/sigmoid_focal_loss_cuda.cu
@@ -109,7 +109,7 @@ at::Tensor SigmoidFocalLoss_forward_cuda(const at::Tensor &logits,
   auto losses = at::empty({num_samples, logits.size(1)}, logits.options());
   auto losses_size = num_samples * logits.size(1);
 
-  dim3 grid(std::min(THCCeilDiv(losses_size, 512L), 4096L));
+  dim3 grid(std::min(THCCeilDiv((long)losses_size, 512L), 4096L));
   dim3 block(512);
 
   if (losses.numel() == 0) {
@@ -147,7 +147,7 @@ at::Tensor SigmoidFocalLoss_backward_cuda(const at::Tensor &logits,
   auto d_logits = at::zeros({num_samples, num_classes}, logits.options());
   auto d_logits_size = num_samples * logits.size(1);
 
-  dim3 grid(std::min(THCCeilDiv(d_logits_size, 512L), 4096L));
+  dim3 grid(std::min(THCCeilDiv((long)d_logits_size, 512L), 4096L));
   dim3 block(512);
 
   if (d_logits.numel() == 0) {
diff --git a/setup.py b/setup.py
index e40909a171bea871fc7f36ef0be361bd28b23e61..c0920d9bc6179ad43ac34b6d80e3409defbd638d 100644
--- a/setup.py
+++ b/setup.py
@@ -1,4 +1,5 @@
 import os
+import platform
 import subprocess
 import time
 from setuptools import Extension, find_packages, setup
@@ -88,18 +89,30 @@ def make_cuda_ext(name, module, sources):
 
     return CUDAExtension(
         name='{}.{}'.format(module, name),
-        sources=[os.path.join(*module.split('.'), p) for p in sources])
+        sources=[os.path.join(*module.split('.'), p) for p in sources],
+        extra_compile_args={
+            'cxx': [],
+            'nvcc': [
+                '-D__CUDA_NO_HALF_OPERATORS__',
+                '-D__CUDA_NO_HALF_CONVERSIONS__',
+                '-D__CUDA_NO_HALF2_OPERATORS__',
+            ]
+        })
 
 
 def make_cython_ext(name, module, sources):
+    extra_compile_args = None
+    if platform.system() != 'Windows':
+        extra_compile_args = {
+            'cxx': ['-Wno-unused-function', '-Wno-write-strings']
+        }
+
     extension = Extension(
         '{}.{}'.format(module, name),
         [os.path.join(*module.split('.'), p) for p in sources],
         include_dirs=[np.get_include()],
         language='c++',
-        extra_compile_args={
-            'cxx': ['-Wno-unused-function', '-Wno-write-strings']
-        })
+        extra_compile_args=extra_compile_args)
     extension, = cythonize(extension)
     return extension