diff --git a/BatchRenderer/Renderers/PBRT/RtbPBRTCloudRenderer.m b/BatchRenderer/Renderers/PBRT/RtbPBRTCloudRenderer.m new file mode 100644 index 0000000000000000000000000000000000000000..59227c2083aebd6647eaafd2b6d25321da562867 --- /dev/null +++ b/BatchRenderer/Renderers/PBRT/RtbPBRTCloudRenderer.m @@ -0,0 +1,156 @@ +classdef RtbPBRTCloudRenderer < RtbRenderer + %% Implementation for rendering with Mitsuba. + + properties + % RenderToolbox4 options struct, see rtbDefaultHints() + hints = []; + + % pbrt info struct + pbrt; + + % Google Clound Storage access token + tokenPath; + token; + + % Cloud folder + cloudFolder; + dataFileName = 'data.zip'; + + % Local folder in the docker image + localFolder = 'WorkDir'; + + % where to write output files + outputFolder; + + % where to put scenes before rendering + workingFolder; + end + + methods + function obj = RtbPBRTCloudRenderer(hints) + obj.hints = rtbDefaultHints(hints); + obj.pbrt = getpref('PBRT'); + obj.tokenPath = hints.tokenPath; + obj.token = loadjson(hints.tokenPath); + obj.outputFolder = rtbWorkingFolder( ... + 'folderName', 'renderings', ... + 'rendererSpecific', true, ... + 'hints', obj.hints); + obj.workingFolder = rtbWorkingFolder('hints', obj.hints); + end + + function info = versionInfo(obj) + try + info = dir(obj.pbrt.executable); + catch err + info = err; + end + end + + function [status, result, image, sampling, imageName] = render(obj, nativeScene) + % look carefully for the file + [~, imageName] = fileparts(nativeScene); + fileInfo = rtbResolveFilePath(nativeScene, obj.workingFolder); + nativeScene = fileInfo.absolutePath; + + % build a pbrt command + workFolder = fullfile('/',obj.localFolder); + outFile = fullfile(workFolder,'renderings','PBRTCloud', [imageName '.dat']); + inFile = fullfile(workFolder,[imageName '.pbrt']); + + + % Bullshit handling of different new-line characters across + % systems. VERY HACKY + key = obj.token.private_key; + key = regexprep(key,'\n','\\n'); + + token = obj.token; + token.private_key = key; + + processedToken = savejson('',token); + processedToken = regexprep(processedToken,'\\\\','\'); + + %{ + command = sprintf('docker run -e http_proxy=http://10.102.1.10:8000 -e https_proxy=https://10.102.1.10:8000 --rm -ti hblasins/syncandrender ./syncAndRender.sh ''%s'' "%s" "%s" "%s" "%s" "%s"',... + processedToken,... + obj.cloudFolder,... + workFolder,... + obj.dataFileName,... + inFile,... + outFile); + %} + + % First delete all jobs that have been successfully completed. + % Otherwise the nodes just accumulate the data and fill in the + % disk space... + kubeCmd = 'kubectl delete job $(kubectl get jobs | awk ''$3=="1" {print $1}'')'; + [status, result] = system(kubeCmd); + + + jobName = lower([obj.hints.recipeName imageName]); + jobName(jobName == '_' | jobName == '.' | jobName == '-') = ''; + jobName = jobName(1:min(63,length(jobName))); + + % Kubernetess does not allow two jobs with the same name. + % We need to delete the old one first + kubeCmd = sprintf('kubectl delete job %s',jobName); + [status, result] = system(kubeCmd); + + + + % Before we can issue a new one + kubeCmd = sprintf('kubectl run %s --image=%s --restart=OnFailure --limits cpu=31000m -- ./syncAndRender.sh ''%s'' "%s" "%s" "%s" "%s" "%s"',... + jobName,... + obj.pbrt.dockerImage,... + processedToken,... + obj.cloudFolder,... + workFolder,... + obj.dataFileName,... + inFile,... + outFile); + + [status, result] = system(kubeCmd); + + + + + + + %{ + % run in a container or locally + if rtbDockerExists() + [status, result] = rtbRunDocker(renderCommand, ... + obj.pbrt.dockerImage, ... + 'workingFolder', obj.workingFolder, ... + 'volumes', {obj.workingFolder, rtbRoot()}, ... + 'hints', obj.hints); + elseif rtbKubernetesExists() + [status, result] = rtbRunKubernetes(renderCommand, ... + obj.pbrt.kubernetesPodSelector, ... + 'workingFolder', obj.workingFolder, ... + 'hints', obj.hints); + else + pbrtPath = fileparts(fullfile(obj.pbrt.app, obj.pbrt.executable)); + renderCommand = sprintf('%s="%s" "%s%s"%s', ... + obj.pbrt.libraryPathName, ... + obj.pbrt.libraryPath, ... + pbrtPath, ... + filesep(), ... + renderCommand); + [status, result] = rtbRunCommand(renderCommand, 'hints', obj.hints); + end + %} + if status ~= 0 + error('RtbPbrtRenderer:pbrtError', result); + end + + sampling = obj.pbrt.S; + image = []; + end + + function [radianceImage, scaleFactor] = toRadiance(obj, image, sampling, nativeScene) + scaleFactor = obj.pbrt.radiometricScaleFactor; + radianceImage = image .* scaleFactor; + end + end +end diff --git a/BatchRenderer/RtbCloud.m b/BatchRenderer/RtbCloud.m new file mode 100644 index 0000000000000000000000000000000000000000..d35f67603b1ee37f45b47630cdecc7cf320db3a9 --- /dev/null +++ b/BatchRenderer/RtbCloud.m @@ -0,0 +1,147 @@ +classdef RtbCloud + %% Implementation for rendering with Mitsuba. + + properties + % RenderToolbox4 options struct, see rtbDefaultHints() + hints = []; + + % pbrt info struct + pbrt; + + % Google Clound Storage access token + tokenPath; + token; + + % Cloud folder + cloudFolder; + dataFileName = 'data.zip'; + + % Local folder in the docker image + localFolder = 'WorkDir'; + + % where to write output files + outputFolder; + + % where to put scenes before rendering + workingFolder; + end + + methods + function obj = RtbCloud(hints) + obj.hints = rtbDefaultHints(hints); + obj.pbrt = getpref('PBRT'); + obj.tokenPath = hints.tokenPath; + obj.token = loadjson(hints.tokenPath); + obj.outputFolder = rtbWorkingFolder( ... + 'folderName', 'renderings', ... + 'rendererSpecific', true, ... + 'hints', obj.hints); + obj.workingFolder = rtbWorkingFolder('hints', obj.hints); + end + + function info = versionInfo(obj) + try + info = dir(obj.pbrt.executable); + catch err + info = err; + end + end + + function [status, result, image, sampling, imageName] = render(obj, nativeScene) + % look carefully for the file + [~, imageName] = fileparts(nativeScene); + fileInfo = rtbResolveFilePath(nativeScene, obj.workingFolder); + nativeScene = fileInfo.absolutePath; + + % build a pbrt command + workFolder = fullfile('/',obj.localFolder); + outFile = fullfile(workFolder,'renderings','PBRTCloud', [imageName '.dat']); + inFile = fullfile(workFolder,[imageName '.pbrt']); + + + % Bullshit handling of different new-line characters across + % systems. VERY HACKY + key = obj.token.private_key; + key = regexprep(key,'\n','\\n'); + + token = obj.token; + token.private_key = key; + + processedToken = savejson('',token); + processedToken = regexprep(processedToken,'\\\\','\'); + + %{ + command = sprintf('docker run -e http_proxy=http://10.102.1.10:8000 -e https_proxy=https://10.102.1.10:8000 --rm -ti hblasins/syncandrender ./syncAndRender.sh ''%s'' "%s" "%s" "%s" "%s" "%s"',... + processedToken,... + obj.cloudFolder,... + workFolder,... + obj.dataFileName,... + inFile,... + outFile); + %} + + jobName = lower(imageName); + jobName(jobName == '_') = '-'; + + % Kubernetess does not allow two jobs with the same name. + % We need to delete the old one first + kubeCmd = sprintf('kubectl delete job %s',jobName); + [status, result] = system(kubeCmd); + + + % Before we can issue a new one + kubeCmd = sprintf('kubectl run %s --image=%s --restart=OnFailure --limits cpu=31000m -- ./syncAndRender.sh ''%s'' "%s" "%s" "%s" "%s" "%s"',... + jobName,... + obj.pbrt.dockerImage,... + processedToken,... + obj.cloudFolder,... + workFolder,... + obj.dataFileName,... + inFile,... + outFile); + + [status, result] = system(kubeCmd); + + + + + + + %{ + % run in a container or locally + if rtbDockerExists() + [status, result] = rtbRunDocker(renderCommand, ... + obj.pbrt.dockerImage, ... + 'workingFolder', obj.workingFolder, ... + 'volumes', {obj.workingFolder, rtbRoot()}, ... + 'hints', obj.hints); + elseif rtbKubernetesExists() + [status, result] = rtbRunKubernetes(renderCommand, ... + obj.pbrt.kubernetesPodSelector, ... + 'workingFolder', obj.workingFolder, ... + 'hints', obj.hints); + else + pbrtPath = fileparts(fullfile(obj.pbrt.app, obj.pbrt.executable)); + renderCommand = sprintf('%s="%s" "%s%s"%s', ... + obj.pbrt.libraryPathName, ... + obj.pbrt.libraryPath, ... + pbrtPath, ... + filesep(), ... + renderCommand); + [status, result] = rtbRunCommand(renderCommand, 'hints', obj.hints); + end + %} + if status ~= 0 + error('RtbPbrtRenderer:pbrtError', result); + end + + sampling = obj.pbrt.S; + image = []; + end + + function [radianceImage, scaleFactor] = toRadiance(obj, image, sampling, nativeScene) + scaleFactor = obj.pbrt.radiometricScaleFactor; + radianceImage = image .* scaleFactor; + end + end +end diff --git a/BatchRenderer/rtbMakeSceneFiles.m b/BatchRenderer/rtbMakeSceneFiles.m index c2160cb18c57ef1640a55b1a6ee9f5c1d1264549..86169bfa3f157ea7fca03177a155a82b2a0ccc2b 100644 --- a/BatchRenderer/rtbMakeSceneFiles.m +++ b/BatchRenderer/rtbMakeSceneFiles.m @@ -101,6 +101,15 @@ makeScenesTick = tic(); if hints.isParallel % distributed "parfor" loop + + [status, nCores] = system('nproc'); + + if status == 0 + cluster = parcluster(); + cluster.NumWorkers = str2double(nCores) - 1; + parpool(cluster,cluster.NumWorkers); + end + parfor cc = 1:nConditions % choose variable values for this condition if isempty(values) @@ -113,6 +122,9 @@ if hints.isParallel nativeScenes{cc} = makeSceneForCondition(strategy, ... scene, mappings, cc, names, conditionValues, hints); end + + delete(gcp); + else % local "for" loop for cc = 1:nConditions diff --git a/Utilities/rtbCloudDownload.m b/Utilities/rtbCloudDownload.m new file mode 100644 index 0000000000000000000000000000000000000000..ec0501c495e71fc3d5518c182a34a14f9a0345c3 --- /dev/null +++ b/Utilities/rtbCloudDownload.m @@ -0,0 +1,62 @@ +function matFileName = rtbCloudDownload( hints ) + +% Download all the data from the working directory into the cloud + +% cmd = sprintf('gcloud auth activate-service-account --key-file=%s',hints.batchRenderStrategy.renderer.tokenPath); +% system(cmd); + +if strcmp(hints.renderer,'PBRTCloud') == 0 + return; +end + +sourceDir = fullfile(hints.batchRenderStrategy.renderer.cloudFolder,'renderings',hints.renderer); +destDir = rtbWorkingFolder('folderName','renderings',... + 'rendererSpecific',true,... + 'hints',hints); + +% Download data (rendering outputs). +cmd = sprintf('gsutil rsync -x ".*.mat" %s %s',sourceDir,destDir); +system(cmd); + +%{ +cmd = sprintf('gsutil cp %s/*.dat %s/',sourceDir,destDir); +system(cmd); + +% Download text files +cmd = sprintf('gsutil cp %s/*.txt %s/',sourceDir,destDir); +system(cmd); +%} + +% Go over all the files and re-create the associated .mat files + +fNames = dir(fullfile(destDir,'*.dat')); +matFileName = cell(length(fNames),1); + +for f=1:length(fNames) + + % Load the data that was saved, everything is here except for radiance + % that was copied from the cloud + [a, conditionName] = fileparts(fNames(f).name); + matFileName{f} = fullfile(destDir,sprintf('%s.mat',conditionName)); + + load(matFileName{f}); + + % Read the radiance data + outFile = fullfile(destDir,fNames(f).name); + S = hints.batchRenderStrategy.renderer.pbrt.S; + image = rtbReadDAT(outFile, 'maxPlanes', S(3)); + + % Assemble a spectral image + [multispectralImage, radiometricScaleFactor] = ... + hints.batchRenderStrategy.renderer.toRadiance(image, S, scene); + + + save(matFileName{f}, 'multispectralImage', 'S', 'radiometricScaleFactor', ... + 'hints', 'scene', 'versionInfo', 'commandResult'); + +end + + + +end + diff --git a/Utilities/rtbCloudUpload.m b/Utilities/rtbCloudUpload.m new file mode 100644 index 0000000000000000000000000000000000000000..b1afaf9524dd333b612efbc3038f6fc81b844b8f --- /dev/null +++ b/Utilities/rtbCloudUpload.m @@ -0,0 +1,29 @@ +function rtbCloudUpload( hints, nativeSceneFiles ) + +% Upload all the data from the working directory into the cloud + +% No need to authenticate on the local system +% cmd = sprintf('gcloud auth activate-service-account --key-file=%s',hints.batchRenderStrategy.renderer.tokenPath); +% system(cmd); + +if strcmp(hints.renderer,'PBRTCloud') == 0 + return; +end + +fileName = hints.batchRenderStrategy.renderer.dataFileName; +allFiles = cell2mat(strcat(nativeSceneFiles,{' '})); + +allFilesAndFolders = sprintf('%s ./resources ./scenes',allFiles); + +currentPath = pwd; +cd(hints.batchRenderStrategy.renderer.workingFolder); +cmd = sprintf('zip -r %s/%s %s',hints.batchRenderStrategy.renderer.workingFolder,fileName,allFilesAndFolders); +system(cmd); +cd(currentPath); + +cmd = sprintf('gsutil cp %s/%s %s/',hints.batchRenderStrategy.renderer.workingFolder,fileName,hints.batchRenderStrategy.renderer.cloudFolder); +system(cmd); + + +end +