Dear all
I proposed a code (below) to extract world 3D coordinates from image pixels of each image of an already aligned chunk.
The code works well, but is extremly long, even if I sample the pixels of the images (about 2 hours/image). Have you an idea on how to speed it up?
Best regards
FabVin
import Metashape, math, os, csv
# Pixel coordinates to 3D coordinates
def pixel_to_point3D(imX,imY):
point2D = PhotoScan.Vector([imX, imY])
sensor = camera.sensor
v = chunk.model.pickPoint(camera.center, camera.transform.mulp(sensor.calibration.unproject(point2D)))
if(v==None):return None
v_t = chunk.transform.matrix.mulp(v)
v_t.size = 3
v_out_world = chunk.crs.project(v_t)
return v_out_world
# Create a sequence of numbers
def seq(start,stop,increment):
n=list()
for num in range(start,stop):
if num % increment == 0:
n.append(num)
continue
pass
return n
doc=PhotoScan.app.document
shift=PhotoScan.Vector([727290,6265210,70])
for chunk in doc.chunks:
for camera in chunk.cameras:
with open(path+'OUT/COORDS/'+chunk.label+'/'+camera.label.split(".")[0]+'.csv', 'wt') as csvfile:
spamwriter=csv.writer(csvfile,delimiter=',',quotechar='|',quoting=csv.QUOTE_NONE,lineterminator='\n')
for y in seq(0,6016,50):
for x in seq(0,4000,50):
result=pixel_to_point3D(x,y)
if(result != None):
result=result-shift
spamwriter.writerow([x]+[y]+[round(result[0],3)]+[round(result[1],3)]+[round(result[2],3)])