Skip to content
Snippets Groups Projects
Commit eb619ed8 authored by Cchen77's avatar Cchen77 :tropical_drink:
Browse files

init

parents
No related branches found
Tags 0.1
No related merge requests found
data_5k_5k.h5
data_1k_1k.h5
\ No newline at end of file
# torusRecon
\ No newline at end of file
h5py==3.11.0
matplotlib==3.8.4
numpy==1.26.4
open3d==0.18.0
scipy==1.13.0
torch==2.2.0
tqdm==4.65.0
File added
"""
作者 ljx
创建一个管道的3D mesh模型文件 ring.obj
2024/4/7
mayble:
pip install scipy
"""
import math
import numpy as np
from scipy.spatial.transform import Rotation as R
import open3d as o3d
# 将模型由normal1旋转到normal2
def rotate_model(vertices, normal1, normal2):
# Normalize the normals
normal1 = normal1 / np.linalg.norm(normal1)
normal2 = normal2 / np.linalg.norm(normal2)
# Compute the rotation axis and angle
axis = np.cross(normal1, normal2)
angle = math.acos(np.dot(normal1, normal2))
# Create the rotation
rotation = R.from_rotvec(axis * angle)
# Apply the rotation to each vertex
rotated_vertices = [rotation.apply(vertex) for vertex in vertices]
return rotated_vertices
def create_ring_obj(r1, r2, theta, phi, center = [0,0,0], normal = [0, 0], alphaStep = 100, betaStep = 50):
"""
转弯半径 r1
环体半径 r2
转弯起始 theta
转弯终止 phi
法向量 normal (使用球面坐标系表示 (theta,phi) theta in [0,pi] and phi in [0,2*pi])
圆环中心 center
alphaStep: alpha方向上的分割数
betaStep: beta方向上的分割数
遍历alpha, beta
生成顶点坐标:
x = (r1 + r2 * cos(beta)) * cos(alpha)
y = (r1 + r2 * cos(beta)) * sin(alpha)
z = r2 * sin(beta)
"""
normal = [np.sin(normal[0])*np.cos(normal[1]),np.sin(normal[0])*np.sin(normal[1]),np.cos(normal[0])]
vertices = []
faces = []
alphas = np.linspace(theta, phi, alphaStep, endpoint=False)
betas = np.linspace(0, 2*np.pi, betaStep, endpoint=False)
# generate vertices
for alpha in alphas:
for beta in betas:
x = (r1 + r2 * math.cos(beta)) * math.cos(alpha)
y = (r1 + r2 * math.cos(beta)) * math.sin(alpha)
z = r2 * math.sin(beta)
vertices.append([x, y, z])
# generate faces
for i in range(alphaStep-1):
for j in range(betaStep):
v1 = i * betaStep + j
v2 = i * betaStep + (j + 1) % betaStep
v3 = ((i+1) % alphaStep) * betaStep + (j + 1) % betaStep
v4 = ((i+1) % alphaStep) * betaStep + j
faces.append([v1, v2, v3, v4])
# rotate the model
vertice2 = rotate_model(vertices, [0, 0, 1], normal)
vertices = [[v[0] + center[0], v[1] + center[1], v[2] + center[2]] for v in vertice2]
#for open3d interface
triangles = []
for face in faces:
triangles.append([face[0],face[1],face[2]])
triangles.append([face[2],face[3],face[0]])
mesh = o3d.geometry.TriangleMesh()
mesh.vertices = o3d.utility.Vector3dVector(vertices)
mesh.triangles = o3d.utility.Vector3iVector(triangles)
#visualize
#o3d.visualization.draw([mesh])
#export obj
#o3d.io.write_triangle_mesh("ring.obj", mesh)
return mesh
def sample_mesh_nearly(mesh,num_points=1000,radius=0.1):
mesh.compute_triangle_normals()
points = []
num_triangle_meshs = len(mesh.triangles)
for _ in range(num_points):
triangle_idx = np.random.randint(0,num_triangle_meshs)
u = np.random.uniform(0,1)
v = np.random.uniform(0,1)
if u + v > 1:
u = 1-u
v = 1-v
w = 1 - u -v
point = u*mesh.vertices[mesh.triangles[triangle_idx][0]] + v*mesh.vertices[mesh.triangles[triangle_idx][1]] + w*mesh.vertices[mesh.triangles[triangle_idx][2]]
point += np.random.uniform(-radius,radius)*mesh.triangle_normals[triangle_idx]
points.append(point)
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(np.asarray(points))
return pcd
if __name__ == '__main__':
mesh = create_ring_obj(100, 10, np.pi/4, 2*np.pi/3, [0.7, 0.3, 0.5], [0,0])
pcd = sample_mesh_nearly(mesh,num_points=500)
o3d.visualization.draw([pcd])
\ No newline at end of file
import h5py
import argparse
import numpy as np
from create_ring import create_ring_obj,sample_mesh_nearly
if __name__ == '__main__':
data_size = 1000
num_points = 1000
X = np.zeros([data_size,num_points,3])
y = np.zeros([data_size,9])
for i in range(data_size):
r2 = np.random.uniform(5,20)
r1 = r2 + np.random.uniform(10,100)
theta = np.random.uniform(0,np.pi)
#torus angle should not be too small
phi = theta + np.random.uniform(np.pi/6,np.pi)
normal = [np.random.uniform(0,np.pi),np.random.uniform(0,2*np.pi)]
center = [np.random.uniform(-100,100),np.random.uniform(-100,100),np.random.uniform(-100,100)]
compact_arg = [r1,r2,theta,phi] + center + normal
y[i] = np.asarray(compact_arg)
mesh = create_ring_obj(r1, r2, theta, phi, center, normal)
pcd = sample_mesh_nearly(mesh,num_points)
X[i] = np.asarray(pcd.points)
with h5py.File('data.h5','w') as hf:
hf.create_dataset("X",data=X)
hf.create_dataset("y",data=y)
This diff is collapsed.
import h5py
import numpy as np
import open3d as o3d
import argparse
from create_ring import create_ring_obj
parser = argparse.ArgumentParser()
parser.add_argument("data_path",type=str,help="dataset path")
args = parser.parse_args()
with h5py.File(args.data_path,"r") as hf:
X = hf["X"][20]
y = hf["y"][20]
r1 = y[0]
r2 = y[1]
theta = y[2]
#torus angle should not be too small
phi = y[3]
center = [y[4],y[5],y[6]]
normal = [y[7],y[8]]
mesh = create_ring_obj(r1,r2,theta,phi,center,normal)
mesh.compute_triangle_normals()
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(np.asarray(X))
o3d.visualization.draw([pcd,mesh])
import torch
import numpy
import h5py
from pathlib import Path
from torch.utils.data import Dataset
class TPCDataset(Dataset):
def __init__(self,path:Path):
super().__init__()
with h5py.File(path) as hf:
self.X = torch.tensor(hf["X"][:])
self.y = torch.tensor(hf["y"][:])
def __len__(self):
assert(len(self.X) == len(self.y))
return len(self.X)
def __getitem__(self, index):
return self.X[index],self.y[index]
import torch
import numpy
import h5py
from torch import nn
import torch.nn.functional as F
from matplotlib import pyplot as plt
from torch.utils.data import Dataset,DataLoader
from pathlib import Path
from Datasets import *
import time
from tqdm import tqdm,trange
SEED = 7777777
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
torch.manual_seed(SEED)
class ToyTorusNet(nn.Module):
def __init__(self,point_count):
super().__init__()
hidden_size0 = 64
hidden_size1 = 128
hidden_size2 = 1024
self.fc0 = nn.Linear(3, hidden_size0,dtype=torch.float64)
self.fc1 = nn.Linear(hidden_size0, hidden_size1,dtype=torch.float64)
self.fc2 = nn.Linear(hidden_size1, hidden_size2,dtype=torch.float64)
self.fc3 = nn.Linear(hidden_size2,hidden_size1,dtype=torch.float64)
self.fc4 = nn.Linear(hidden_size1,hidden_size0,dtype=torch.float64)
self.fc5 = nn.Linear(hidden_size0,4,dtype=torch.float64)
self.relu = nn.ReLU()
self.to(DEVICE)
def forward(self,x:torch.Tensor):
x = self.fc0(x)
x = self.relu(x)
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
x = self.relu(x)
x,_= torch.max(x,dim=1)
x = self.fc3(x)
x = self.relu(x)
x = self.fc4(x)
x = self.relu(x)
x = self.fc5(x)
return x
def SaveModel(model:ToyTorusNet,path:Path):
torch.save(model.state_dict(),path)
def LoadModel(point_count,path:Path):
model = ToyTorusNet(point_count)
model.load_state_dict(torch.load(path))
return model
def y2params_0(y):
# theta = y[0] - torch.pi*torch.floor(y[0]/torch.pi)
# phi = y[1] - 2*torch.pi*torch.floor(y[1]/(2*torch.pi))
# normal = torch.tensor([torch.sin(theta)*torch.cos(phi),torch.sin(theta)*torch.sin(phi),torch.cos(theta)]).to(DEVICE)
norm = torch.norm(y[0:3])
normal = y[0:3]/norm
point = normal*y[3]
return normal,point
def loss_0(X,y):
tot_loss = torch.tensor(0,dtype=torch.float64).to(DEVICE)
batch_size = len(X)
point_size = len(X[0])
for i in range(batch_size):
y_pred = y[i]
normal,point = y2params_0(y_pred)
normalized_d = F.normalize(X[i]-point, p=2, dim=1)
expanded_normal = normal.unsqueeze(0).expand(point_size, -1)
elementwise_product = torch.mul(normalized_d, expanded_normal)
tot_loss += torch.abs(torch.sum(elementwise_product))/point_size
return tot_loss/batch_size
def TrainModel(model:ToyTorusNet,path:Path,epochs=1000):
dataset = TPCDataset(path)
train_ratio = 0.8
train_size = int(train_ratio*len(dataset))
test_size = len(dataset) - train_size
train_dataset,test_dataset = torch.utils.data.random_split(dataset,[train_size,test_size])
train_dataloader = DataLoader(train_dataset,8,shuffle=True)
test_dataloader = DataLoader(test_dataset,8,shuffle=False)
model.to(DEVICE)
loss_fn = loss_0
optimizer = torch.optim.SGD(model.parameters(),lr = 0.001)
for epoch in range(epochs):
print(f"Epoch:{epoch}:")
model.train()
train_loss = 0
for X,y in tqdm(train_dataloader,desc="Train"):
X = X.to(DEVICE)
y = y.to(DEVICE)
optimizer.zero_grad()
y_pred = model(X)
loss = loss_fn(X,y_pred)
train_loss += loss.item()
loss.backward()
optimizer.step()
train_loss /= len(train_dataloader)
print(f"TrainLoss:{train_loss:.3f}")
model.eval()
with torch.inference_mode():
test_loss = 0
for X,y in tqdm(test_dataloader,desc="Test"):
X = X.to(DEVICE)
y = y.to(DEVICE)
y_pred = model(X)
test_loss += loss_fn(X,y_pred).item()
test_loss /= len(train_dataloader)
print(f"TestLoss:{test_loss:.3f}")
return model
import math
import numpy as np
from scipy.spatial.transform import Rotation as R
import open3d as o3d
# 将模型由normal1旋转到normal2
def get_plane(center,normal):
# Normalize the normals
normal1 = normal1 / np.linalg.norm([0,1,0])
normal = normal / np.linalg.norm(normal)
# Compute the rotation axis and angle
axis = np.cross(normal1, normal)
angle = math.acos(np.dot(normal1, normal))
# Create the rotation
rotation = R.from_rotvec(axis * angle)
# Apply the rotation to each vertex
vertices = [[20+center[0],center[1],20+center[2]],[20+center[0],0+center[1],-20+center[2]]
,[-20+center[0],center[1],20+center[2]],[-20+center[0],center[1],-20+center[2]]]
rotated_vertices = [rotation.apply(vertex) for vertex in vertices]
triangles = [[0,1,2],[0,1,3]]
mesh = o3d.geometry.TriangleMesh()
mesh.vertices = o3d.utility.Vector3dVector(rotated_vertices)
mesh.triangles = o3d.utility.Vector3iVector(triangles)
return mesh
def get_plane_from_tensor(y):
y = y.squeeze()
File added
File added
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment