5
« on: January 10, 2022, 04:16:54 PM »
Excuse me everyone! I am writing the python script to create the 3D model for generate the point cloud and depth image. However, I want to define the coordinate system in Local coordinates all in meter units. Also, I want to auto detect the marker in the image then I put the coordinate the marker for alignment the camera. The final step, I want to export the point cloud corresponding to the local coordinate in meter unit as well. How can I write the script. I will show you the code and figure.
import Metashape
import textwrap
import glob
import os
global doc
doc = Metashape.app.document
print("Script started")
#creating new chunk
doc.addChunk()
chunk = doc.chunk
chunk.label = "New Chunk"
Metashape.app.gpu_mask = 2 ** len(Metashape.app.enumGPUDevices()) - 1
if Metashape.app.gpu_mask:
Metashape.app.cpu_enable = False
photo_list = list()
image_dir='RawPicture'
path_img = os.path.join(os.getcwd(),image_dir)
for j,img_file in enumerate(glob.glob(path_img+'/*.jpg')):
img_name = os.path.splitext(os.path.basename(img_file))[0]
photo_list.append(img_file)
#accuracy = Metashape.Accuracy.HighAccuracy #align photos accuracy
#preselection = Metashape.Preselection.GenericPreselection
keypoints = 40000 #align photos key point limit
tiepoints = 10000 #align photos tie point limit
chunk.addPhotos(photo_list)
#align photos
chunk.matchPhotos(downscale=2, generic_preselection=True, filter_mask = False, keypoint_limit = keypoints, tiepoint_limit = tiepoints)
chunk.alignCameras()
chunk.optimizeCameras()
chunk.buildDepthMaps(downscale=2, filter_mode=Metashape.AggressiveFiltering)
chunk.buildDenseCloud()
#building mesh
#chunk.buildModel(surface = surface, source = source, interpolation = interpolation, face_count = face_num)
chunk.buildModel(surface_type=Metashape.Arbitrary, interpolation=Metashape.EnabledInterpolation)
if chunk.transform.scale:
scale = chunk.transform.scale
else:
scale = 1
camera_list = list()
for camera in chunk.cameras:
camera_list.append(camera)
cam = chunk.cameras[4]
depth = chunk.model.renderDepth(cam.transform, cam.sensor.calibration)
depth_scaled = Metashape.Image(depth.width, depth.height, " ", "F32")
depth_grey = Metashape.Image(depth.width, depth.height, "RGB", "U8")
v_min = 10E10
v_max = -10E10
print(" ***started export depth image*********")
for y in range(depth.height):
for x in range(depth.width):
depth_scaled[x,y] = (depth[x,y][0] * scale, )
v_max = max(v_max, depth_scaled[x,y][0])
if depth_scaled[x,y][0]:
v_min = min(v_min, depth_scaled[x,y][0])
crange = v_max - v_min
for y in range(depth.height):
for x in range(depth.width):
color = int((v_max - depth_scaled[x,y][0]) / crange * 255)
depth_grey[x,y] = (color, color, color)
#export
output_dir='Depth'
path_out = os.path.join(os.getcwd(),output_dir)
depth_grey.save(path_out + "/testPython.png")
chunk.exportPoints(path = path_out + "/model.ply", format = Metashape.PointsFormatPLY, source_data = Metashape.DenseCloudData, save_colors = False)