evaluate_lr.py 9.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254
  1. """
  2. LR (Low-Resolution) evaluation.
  3. Note, the script only does evaluation. You will need to first inference yourself and save the results to disk
  4. Expected directory format for both prediction and ground-truth is:
  5. videomatte_512x288
  6. ├── videomatte_motion
  7. ├── pha
  8. ├── 0000
  9. ├── 0000.png
  10. ├── fgr
  11. ├── 0000
  12. ├── 0000.png
  13. ├── videomatte_static
  14. ├── pha
  15. ├── 0000
  16. ├── 0000.png
  17. ├── fgr
  18. ├── 0000
  19. ├── 0000.png
  20. Prediction must have the exact file structure and file name as the ground-truth,
  21. meaning that if the ground-truth is png/jpg, prediction should be png/jpg.
  22. Example usage:
  23. python evaluate.py \
  24. --pred-dir PATH_TO_PREDICTIONS/videomatte_512x288 \
  25. --true-dir PATH_TO_GROUNDTURTH/videomatte_512x288
  26. An excel sheet with evaluation results will be written to "PATH_TO_PREDICTIONS/videomatte_512x288/videomatte_512x288.xlsx"
  27. """
  28. import argparse
  29. import os
  30. import cv2
  31. import numpy as np
  32. import xlsxwriter
  33. from concurrent.futures import ThreadPoolExecutor
  34. from tqdm import tqdm
  35. class Evaluator:
  36. def __init__(self):
  37. self.parse_args()
  38. self.init_metrics()
  39. self.evaluate()
  40. self.write_excel()
  41. def parse_args(self):
  42. parser = argparse.ArgumentParser()
  43. parser.add_argument('--pred-dir', type=str, required=True)
  44. parser.add_argument('--true-dir', type=str, required=True)
  45. parser.add_argument('--num-workers', type=int, default=48)
  46. parser.add_argument('--metrics', type=str, nargs='+', default=[
  47. 'pha_mad', 'pha_mse', 'pha_grad', 'pha_conn', 'pha_dtssd', 'fgr_mad', 'fgr_mse'])
  48. self.args = parser.parse_args()
  49. def init_metrics(self):
  50. self.mad = MetricMAD()
  51. self.mse = MetricMSE()
  52. self.grad = MetricGRAD()
  53. self.conn = MetricCONN()
  54. self.dtssd = MetricDTSSD()
  55. def evaluate(self):
  56. tasks = []
  57. position = 0
  58. with ThreadPoolExecutor(max_workers=self.args.num_workers) as executor:
  59. for dataset in sorted(os.listdir(self.args.pred_dir)):
  60. if os.path.isdir(os.path.join(self.args.pred_dir, dataset)):
  61. for clip in sorted(os.listdir(os.path.join(self.args.pred_dir, dataset))):
  62. future = executor.submit(self.evaluate_worker, dataset, clip, position)
  63. tasks.append((dataset, clip, future))
  64. position += 1
  65. self.results = [(dataset, clip, future.result()) for dataset, clip, future in tasks]
  66. def write_excel(self):
  67. workbook = xlsxwriter.Workbook(os.path.join(self.args.pred_dir, f'{os.path.basename(self.args.pred_dir)}.xlsx'))
  68. summarysheet = workbook.add_worksheet('summary')
  69. metricsheets = [workbook.add_worksheet(metric) for metric in self.results[0][2].keys()]
  70. for i, metric in enumerate(self.results[0][2].keys()):
  71. summarysheet.write(i, 0, metric)
  72. summarysheet.write(i, 1, f'={metric}!B2')
  73. for row, (dataset, clip, metrics) in enumerate(self.results):
  74. for metricsheet, metric in zip(metricsheets, metrics.values()):
  75. # Write the header
  76. if row == 0:
  77. metricsheet.write(1, 0, 'Average')
  78. metricsheet.write(1, 1, f'=AVERAGE(C2:ZZ2)')
  79. for col in range(len(metric)):
  80. metricsheet.write(0, col + 2, col)
  81. colname = xlsxwriter.utility.xl_col_to_name(col + 2)
  82. metricsheet.write(1, col + 2, f'=AVERAGE({colname}3:{colname}9999)')
  83. metricsheet.write(row + 2, 0, dataset)
  84. metricsheet.write(row + 2, 1, clip)
  85. metricsheet.write_row(row + 2, 2, metric)
  86. workbook.close()
  87. def evaluate_worker(self, dataset, clip, position):
  88. framenames = sorted(os.listdir(os.path.join(self.args.pred_dir, dataset, clip, 'pha')))
  89. metrics = {metric_name : [] for metric_name in self.args.metrics}
  90. pred_pha_tm1 = None
  91. true_pha_tm1 = None
  92. for i, framename in enumerate(tqdm(framenames, desc=f'{dataset} {clip}', position=position, dynamic_ncols=True)):
  93. true_pha = cv2.imread(os.path.join(self.args.true_dir, dataset, clip, 'pha', framename), cv2.IMREAD_GRAYSCALE).astype(np.float32) / 255
  94. pred_pha = cv2.imread(os.path.join(self.args.pred_dir, dataset, clip, 'pha', framename), cv2.IMREAD_GRAYSCALE).astype(np.float32) / 255
  95. if 'pha_mad' in self.args.metrics:
  96. metrics['pha_mad'].append(self.mad(pred_pha, true_pha))
  97. if 'pha_mse' in self.args.metrics:
  98. metrics['pha_mse'].append(self.mse(pred_pha, true_pha))
  99. if 'pha_grad' in self.args.metrics:
  100. metrics['pha_grad'].append(self.grad(pred_pha, true_pha))
  101. if 'pha_conn' in self.args.metrics:
  102. metrics['pha_conn'].append(self.conn(pred_pha, true_pha))
  103. if 'pha_dtssd' in self.args.metrics:
  104. if i == 0:
  105. metrics['pha_dtssd'].append(0)
  106. else:
  107. metrics['pha_dtssd'].append(self.dtssd(pred_pha, pred_pha_tm1, true_pha, true_pha_tm1))
  108. pred_pha_tm1 = pred_pha
  109. true_pha_tm1 = true_pha
  110. if 'fgr_mse' in self.args.metrics or 'fgr_mad' in self.args.metrics:
  111. true_fgr = cv2.imread(os.path.join(self.args.true_dir, dataset, clip, 'fgr', framename), cv2.IMREAD_COLOR).astype(np.float32) / 255
  112. pred_fgr = cv2.imread(os.path.join(self.args.pred_dir, dataset, clip, 'fgr', framename), cv2.IMREAD_COLOR).astype(np.float32) / 255
  113. true_msk = true_pha > 0
  114. if 'fgr_mse' in self.args.metrics:
  115. metrics['fgr_mse'].append(self.mse(pred_fgr[true_msk], true_fgr[true_msk]))
  116. if 'fgr_mad' in self.args.metrics:
  117. metrics['fgr_mad'].append(self.mad(pred_fgr[true_msk], true_fgr[true_msk]))
  118. return metrics
  119. class MetricMAD:
  120. def __call__(self, pred, true):
  121. return np.abs(pred - true).mean() * 1e3
  122. class MetricMSE:
  123. def __call__(self, pred, true):
  124. return ((pred - true) ** 2).mean() * 1e3
  125. class MetricGRAD:
  126. def __init__(self, sigma=1.4):
  127. self.filter_x, self.filter_y = self.gauss_filter(sigma)
  128. def __call__(self, pred, true):
  129. pred_normed = np.zeros_like(pred)
  130. true_normed = np.zeros_like(true)
  131. cv2.normalize(pred, pred_normed, 1., 0., cv2.NORM_MINMAX)
  132. cv2.normalize(true, true_normed, 1., 0., cv2.NORM_MINMAX)
  133. true_grad = self.gauss_gradient(true_normed).astype(np.float32)
  134. pred_grad = self.gauss_gradient(pred_normed).astype(np.float32)
  135. grad_loss = ((true_grad - pred_grad) ** 2).sum()
  136. return grad_loss / 1000
  137. def gauss_gradient(self, img):
  138. img_filtered_x = cv2.filter2D(img, -1, self.filter_x, borderType=cv2.BORDER_REPLICATE)
  139. img_filtered_y = cv2.filter2D(img, -1, self.filter_y, borderType=cv2.BORDER_REPLICATE)
  140. return np.sqrt(img_filtered_x**2 + img_filtered_y**2)
  141. @staticmethod
  142. def gauss_filter(sigma, epsilon=1e-2):
  143. half_size = np.ceil(sigma * np.sqrt(-2 * np.log(np.sqrt(2 * np.pi) * sigma * epsilon)))
  144. size = np.int(2 * half_size + 1)
  145. # create filter in x axis
  146. filter_x = np.zeros((size, size))
  147. for i in range(size):
  148. for j in range(size):
  149. filter_x[i, j] = MetricGRAD.gaussian(i - half_size, sigma) * MetricGRAD.dgaussian(
  150. j - half_size, sigma)
  151. # normalize filter
  152. norm = np.sqrt((filter_x**2).sum())
  153. filter_x = filter_x / norm
  154. filter_y = np.transpose(filter_x)
  155. return filter_x, filter_y
  156. @staticmethod
  157. def gaussian(x, sigma):
  158. return np.exp(-x**2 / (2 * sigma**2)) / (sigma * np.sqrt(2 * np.pi))
  159. @staticmethod
  160. def dgaussian(x, sigma):
  161. return -x * MetricGRAD.gaussian(x, sigma) / sigma**2
  162. class MetricCONN:
  163. def __call__(self, pred, true):
  164. step=0.1
  165. thresh_steps = np.arange(0, 1 + step, step)
  166. round_down_map = -np.ones_like(true)
  167. for i in range(1, len(thresh_steps)):
  168. true_thresh = true >= thresh_steps[i]
  169. pred_thresh = pred >= thresh_steps[i]
  170. intersection = (true_thresh & pred_thresh).astype(np.uint8)
  171. # connected components
  172. _, output, stats, _ = cv2.connectedComponentsWithStats(
  173. intersection, connectivity=4)
  174. # start from 1 in dim 0 to exclude background
  175. size = stats[1:, -1]
  176. # largest connected component of the intersection
  177. omega = np.zeros_like(true)
  178. if len(size) != 0:
  179. max_id = np.argmax(size)
  180. # plus one to include background
  181. omega[output == max_id + 1] = 1
  182. mask = (round_down_map == -1) & (omega == 0)
  183. round_down_map[mask] = thresh_steps[i - 1]
  184. round_down_map[round_down_map == -1] = 1
  185. true_diff = true - round_down_map
  186. pred_diff = pred - round_down_map
  187. # only calculate difference larger than or equal to 0.15
  188. true_phi = 1 - true_diff * (true_diff >= 0.15)
  189. pred_phi = 1 - pred_diff * (pred_diff >= 0.15)
  190. connectivity_error = np.sum(np.abs(true_phi - pred_phi))
  191. return connectivity_error / 1000
  192. class MetricDTSSD:
  193. def __call__(self, pred_t, pred_tm1, true_t, true_tm1):
  194. dtSSD = ((pred_t - pred_tm1) - (true_t - true_tm1)) ** 2
  195. dtSSD = np.sum(dtSSD) / true_t.size
  196. dtSSD = np.sqrt(dtSSD)
  197. return dtSSD * 1e2
  198. if __name__ == '__main__':
  199. Evaluator()