7
« on: April 27, 2021, 10:24:20 AM »
Hi Paulo,
Thanks a lot for this very swift reply. I have followed your instructions (including not forcing the camera calibration, but just defining the focal length, width and height), but still get a 'Zero resolution' error when getting at the point cloud calculation, which makes me think there might be an error somewhere else.
This is the script that I am currently using:
import os
import Metashape
import csv
import math
import copy
import time
import statistics
# following https://github.com/agisoft-llc/metashape-scripts/blob/master/src/quick_layout.py
# Checking compatibility
compatible_major_version = "1.7"
found_major_version = ".".join(Metashape.app.version.split('.')[:2])
if found_major_version != compatible_major_version:
raise Exception("Incompatible Metashape version: {} != {}".format(found_major_version, compatible_major_version))
from PySide2.QtGui import *
from PySide2.QtCore import *
from PySide2.QtWidgets import *
def time_measure(func):
def wrapper(*args, **kwargs):
t1 = time.time()
res = func(*args, **kwargs)
t2 = time.time()
print("Finished processing in {} sec.".format(t2 - t1))
return res
return wrapper
def show_message(msg):
msgBox = QMessageBox()
print(msg)
msgBox.setText(msg)
msgBox.exec()
def check_chunk(chunk):
if chunk is None or len(chunk.cameras) == 0:
show_message("Empty chunk!")
return False
if chunk.crs is None:
show_message("Initialize chunk coordinate system first")
return False
return True
def get_antenna_transform(sensor):
location = sensor.antenna.location
if location is None:
location = sensor.antenna.location_ref
rotation = sensor.antenna.rotation
if rotation is None:
rotation = sensor.antenna.rotation_ref
return Metashape.Matrix.Diag((1, -1, -1, 1)) * Metashape.Matrix.Translation(location) * Metashape.Matrix.Rotation(Metashape.Utils.ypr2mat(rotation))
def init_chunk_transform(chunk):
if chunk.transform.scale is not None:
return
chunk_origin = Metashape.Vector([0, 0, 0])
for c in chunk.cameras:
if c.reference.location is None:
continue
chunk_origin = chunk.crs.unproject(c.reference.location)
break
chunk.transform.scale = 1
chunk.transform.rotation = Metashape.Matrix.Diag((1, 1, 1))
chunk.transform.translation = chunk_origin
def estimate_rotation_matrices(chunk):
groups = copy.copy(chunk.camera_groups)
groups.append(None)
for group in groups:
group_cameras = list(filter(lambda c: c.group == group, chunk.cameras))
if len(group_cameras) == 0:
continue
if len(group_cameras) == 1:
if group_cameras[0].reference.rotation is None:
group_cameras[0].reference.rotation = Metashape.Vector([0, 0, 0])
continue
for idx, c in enumerate(group_cameras[0:-1]):
next_camera = group_cameras[idx + 1]
if c.reference.rotation is None:
if c.reference.location is None or next_camera.reference.location is None:
continue
prev_location = chunk.crs.unproject(c.reference.location)
next_location = chunk.crs.unproject(next_camera.reference.location)
direction = chunk.crs.localframe(prev_location).mulv(next_location - prev_location)
yaw = math.degrees(math.atan2(direction.y, direction.x)) + 90
if yaw < 0:
yaw = yaw + 360
c.reference.rotation = Metashape.Vector([yaw, 0, 0])
if group_cameras[-1].reference.rotation is None and group_cameras[-1].reference.location is not None:
group_cameras[-1].reference.rotation = group_cameras[-2].reference.rotation
@time_measure
def align_cameras(chunk):
init_chunk_transform(chunk)
estimate_rotation_matrices(chunk)
for c in chunk.cameras:
if c.transform is not None:
continue
location = c.reference.location
if location is None:
continue
rotation = c.reference.rotation
if rotation is None:
continue
location = chunk.crs.unproject(location) # location in ECEF
rotation = chunk.crs.localframe(location).rotation().t() * Metashape.Utils.euler2mat(rotation, chunk.euler_angles) # rotation matrix in ECEF
transform = Metashape.Matrix.Translation(location) * Metashape.Matrix.Rotation(rotation)
transform = chunk.transform.matrix.inv() * transform * get_antenna_transform(c.sensor).inv()
c.transform = Metashape.Matrix.Translation(transform.translation()) * Metashape.Matrix.Rotation(transform.rotation())
def run_camera_alignment():
print("Alignement started...")
doc = Metashape.app.document
chunk = doc.chunk
if not check_chunk(chunk):
return
try:
align_cameras(chunk)
except Exception as e:
print(e)
print("Alignement finished!")
global doc
doc = Metashape.app.document
path = "/path/2/file"
photo_list = list(...)
doc.save(path)
# Add chunk
chunk = doc.chunk
# load images to chunk
chunk.addPhotos(photo_list)
# Load camera position & view angles
chunk.importReference(path=''.join([resultsdir,'IMG_ref.csv']),
format=Metashape.ReferenceFormatCSV,
columns='nxyzXYZabcABC',delimiter=",")
#define coordinate system
chunk.crs = Metashape.CoordinateSystem("EPSG::32646")
doc.save(path)
# Import calibration parameters of cameras
for camera in chunk.cameras:
sensor = camera.sensor
new_sensor = chunk.addSensor()
new_sensor.focal_length = 18 #in mm
new_sensor.height = 4000 # in pixels
new_sensor.width = 6000 # in pixels
new_sensor.pixel_height = 14.9/new_sensor.height
new_sensor.pixel_width = 22.3/new_sensor.width
new_sensor.pixel_size = Metashape.Vector([new_sensor.pixel_height, new_sensor.pixel_width])
new_sensor.type = Metashape.Sensor.Type.Frame
camera.sensor = new_sensor
doc.save(path)
# Match photos
accuracy = 0 # equivalent to highest accuracy
keypoints = 200000 #align photos key point limit
tiepoints = 20000 #align photos tie point limit
chunk.matchPhotos(downscale=accuracy, generic_preselection = True,reference_preselection=True,\
filter_mask = False, keypoint_limit = keypoints, tiepoint_limit = tiepoints)
doc.save(path)
# Align cameras using uploaded camera position and view angles (following https://github.com/agisoft-llc/metashape-scripts/blob/master/src/quick_layout.py)
run_camera_alignment()
# Define: Bounding box around camera locations (based on https://www.agisoft.com/forum/index.php?topic=10102.0)
BUFFER = 10000 #percent
def cross(a, b):
result = Metashape.Vector([a.y*b.z - a.z*b.y, a.z*b.x - a.x*b.z, a.x*b.y - a.y *b.x])
return result.normalized()
new_region = Metashape.Region()
xcoord = Metashape.Vector([10E10, -10E10])
ycoord = Metashape.Vector([10E10, -10E10])
avg = [[],[]]
T = chunk.transform.matrix
s = chunk.transform.matrix.scale()
crs = chunk.crs
z = Metashape.Vector([0,0])
for camera in chunk.cameras:
if camera.transform:
coord = crs.project(T.mulp(camera.center))
xcoord[0] = min(coord.x, xcoord[0])
xcoord[1] = max(coord.x, xcoord[1])
ycoord[0] = min(coord.y, ycoord[0])
ycoord[1] = max(coord.y, ycoord[1])
z[0] += coord.z
z[1] += 1
avg[0].append(coord.x)
avg[1].append(coord.y)
z = z[0] / z[1]
avg = Metashape.Vector([statistics.median(avg[0]), statistics.median(avg[1]), z])
corners = [Metashape.Vector([xcoord[0], ycoord[0], z]),
Metashape.Vector([xcoord[0], ycoord[1], z]),
Metashape.Vector([xcoord[1], ycoord[1], z]),
Metashape.Vector([xcoord[1], ycoord[0], z])]
corners = [T.inv().mulp(crs.unproject(x)) for x in list(corners)]
side1 = corners[0] - corners[1]
side2 = corners[0] - corners[-1]
side1g = T.mulp(corners[0]) - T.mulp(corners[1])
side2g = T.mulp(corners[0]) - T.mulp(corners[-1])
side3g = T.mulp(corners[0]) - T.mulp(Metashape.Vector([corners[0].x, corners[0].y, 0]))
new_size = ((100 + BUFFER) / 100) * Metashape.Vector([side2g.norm()/s, side1g.norm()/s, 3*side3g.norm() / s]) ##
xcoord, ycoord, z = T.inv().mulp(crs.unproject(Metashape.Vector([sum(xcoord)/2., sum(ycoord)/2., z - 2 * side3g.z]))) #
new_center = Metashape.Vector([xcoord, ycoord, z]) #by 4 corners
horizontal = side2
vertical = side1
normal = cross(vertical, horizontal)
horizontal = -cross(vertical, normal)
vertical = vertical.normalized()
R = Metashape.Matrix ([horizontal, vertical, -normal])
new_region.rot = R.t()
new_region.center = new_center
new_region.size = new_size
chunk.region = new_region
# Process sparse cloud
chunk.triangulatePoints()
#building dense cloud
chunk.buildDepthMaps(downscale=4, filter_mode=Metashape.MildFiltering)
chunk.buildDenseCloud()
print("Script finished")
Any ideas welcome! Thanks!