diff --git a/code/python/src/utility/blending.py b/code/python/src/utility/blending.py
index fcbd2085fe50566381b1f20546725b6bb124e785..9e18e938182356b7746fa5cc0f10beb23100ee05 100644
--- a/code/python/src/utility/blending.py
+++ b/code/python/src/utility/blending.py
@@ -260,9 +260,9 @@ class BlendIt:
                                                                [tangent_sq_yv, tangent_sq_xv],
                                                                order=1, mode='constant', cval=0.)
 
-            erp_radial_weights[erp_sq_yv.astype(np.int), erp_sq_xv.astype(np.int), triangle_index] = erp_face_radial_weights
+            erp_radial_weights[erp_sq_yv.astype(int), erp_sq_xv.astype(int), triangle_index] = erp_face_radial_weights
 
-            erp_frustum_weights[erp_sq_yv.astype(np.int), erp_sq_xv.astype(np.int),
+            erp_frustum_weights[erp_sq_yv.astype(int), erp_sq_xv.astype(int),
                                 triangle_index] = erp_face_frustum_weights
 
         self.frustum_blendweights = erp_frustum_weights
@@ -294,11 +294,11 @@ class BlendIt:
             erp_face_image = ndimage.map_coordinates(tangent_images[triangle_index], [tangent_sq_yv, tangent_sq_xv],
                                                      order=1, mode='constant', cval=0.)
 
-            nn_blending[erp_tri_yv.astype(np.int), erp_tri_xv.astype(np.int)] = \
+            nn_blending[erp_tri_yv.astype(int), erp_tri_xv.astype(int)] = \
                 ndimage.map_coordinates(tangent_images[triangle_index], [tangent_tri_yv, tangent_tri_xv],
                                         order=1, mode='constant', cval=0.)
 
-            erp_depth_tensor[erp_sq_yv.astype(np.int), erp_sq_xv.astype(np.int),
+            erp_depth_tensor[erp_sq_yv.astype(int), erp_sq_xv.astype(int),
                              triangle_index] = erp_face_image.astype(np.float64)
 
         return erp_depth_tensor, nn_blending
@@ -322,7 +322,7 @@ class BlendIt:
 
     def get_frustum_blendweights(self, size):
         height, width = size
-        weight_matrix = np.zeros((height, width), dtype=np.float)
+        weight_matrix = np.zeros((height, width), dtype=float)
 
         x_list = np.linspace(0, width, width, endpoint=False)
         y_list = np.linspace(0, height, height, endpoint=False)
@@ -343,8 +343,8 @@ class BlendIt:
         peak_bottom_right = np.array([np.max(peak_coors[0]), np.max(peak_coors[1])])
 
         unit_dir = np.array([1/np.sqrt(2), 1/np.sqrt(2)])
-        top_left = (peak_top_left - 2*self.diagonal_percentage*unit_dir).astype(np.int)
-        bottom_right = (peak_bottom_right + 2*self.diagonal_percentage*unit_dir).astype(np.int)
+        top_left = (peak_top_left - 2*self.diagonal_percentage*unit_dir).astype(int)
+        bottom_right = (peak_bottom_right + 2*self.diagonal_percentage*unit_dir).astype(int)
         total_dist[top_left[0]:bottom_right[0]+1, top_left[1]:bottom_right[1]+1] = 0
         total_dist = (total_dist - np.min(total_dist)) / np.ptp(total_dist)
         total_dist[top_left[0]:bottom_right[0] + 1, top_left[1]:bottom_right[1] + 1] = 1
diff --git a/code/python/src/utility/cam_models.py b/code/python/src/utility/cam_models.py
index 90cfe818acf67f61eb2a608b0522f75280d24e7e..fea7bd5a0c2d8e4243a74298461bf688a1cbaa29 100644
--- a/code/python/src/utility/cam_models.py
+++ b/code/python/src/utility/cam_models.py
@@ -316,11 +316,11 @@ def stitch_rgb_image(image_data_list, image_param_list, fisheye_model, subimage_
     fisheye_3d_points = cam2world(fisheye_2d_points, fisheye_model) 
     # point3d2obj(fisheye_3d_points, "D:/1.obj")
 
-    fisheye_image = np.zeros((fisheye_image_height, fisheye_image_width, channel_number), np.float)
-    fisheye_image_weight = np.zeros((fisheye_image_height, fisheye_image_width), np.float)
+    fisheye_image = np.zeros((fisheye_image_height, fisheye_image_width, channel_number), float)
+    fisheye_image_weight = np.zeros((fisheye_image_height, fisheye_image_width), float)
     for index in range(0, len(image_data_list)):
-        fisheye_image_weight_subimg = np.zeros((fisheye_image_height, fisheye_image_width), np.float)
-        fisheye_image_subimage = np.zeros((fisheye_image_height, fisheye_image_width, channel_number), np.float)
+        fisheye_image_weight_subimg = np.zeros((fisheye_image_height, fisheye_image_width), float)
+        fisheye_image_subimage = np.zeros((fisheye_image_height, fisheye_image_width, channel_number), float)
 
         image_param = image_param_list[index]
         image_data = image_data_list[index]
@@ -342,7 +342,7 @@ def stitch_rgb_image(image_data_list, image_param_list, fisheye_model, subimage_
         available_pixels_list_fov = np.logical_and(points_azimuth_inhfov, points_altitude_invfov)
         available_pixels_list_fov_mat = available_pixels_list_fov.reshape(fisheye_image_height, fisheye_image_width)
 
-        fisheye_2d_points_subimage = fisheye_2d_points[available_pixels_list_fov].astype(np.int)
+        fisheye_2d_points_subimage = fisheye_2d_points[available_pixels_list_fov].astype(int)
 
         # projection to pin-hole image
         pinhole_3d_points = (np.divide(pinhole_3d_points, pinhole_3d_points[2, :]))
@@ -362,7 +362,7 @@ def stitch_rgb_image(image_data_list, image_param_list, fisheye_model, subimage_
                                         order=1, mode='constant', cval=255.0)
 
         # compute blend weight
-        available_pixels_weight = np.ones(available_pixels_list.shape,  np.float)
+        available_pixels_weight = np.ones(available_pixels_list.shape,  float)
         available_pixels_weight[~available_pixels_list] = 0
         fisheye_image_weight_subimg[available_pixels_list_fov_mat] = available_pixels_weight
 
@@ -429,7 +429,7 @@ def sample_rgb_image(img, model, fov=[60, 60], canvas_size=[400, 400], sample_gr
         # NOTE world2cam use fast_poly, world2cam_slow use poly
         # fetch_from = world2cam(world_cs.T, model)
         fetch_from = world2cam_slow(world_cs, model).T
-        tangential_img = np.zeros(tuple(canvas_size) + (channel_number,), dtype=np.float)
+        tangential_img = np.zeros(tuple(canvas_size) + (channel_number,), dtype=float)
 
         for channel in range(0, channel_number):
             tangential_img[:, :, channel] = ndimage.map_coordinates(img[:, :, channel], [fetch_from[:, 1].reshape(canvas_size), fetch_from[:, 0].reshape(canvas_size)], order=1, mode='constant')
@@ -501,12 +501,12 @@ def sample_img(img, cam_model, fov=53, run_midas=False):
 
     equirect_size = (3, 1000, 2000)     # Size for equirectangular image
     equirect_3D_points, _ = equirect_cam2world(equirect_size[1:])
-    equirect_3D_points_rgb = np.zeros((7, equirect_3D_points.shape[-1]), dtype=np.float)
+    equirect_3D_points_rgb = np.zeros((7, equirect_3D_points.shape[-1]), dtype=float)
     equirect_3D_points_rgb[0, :] = equirect_3D_points[0, :]
     equirect_3D_points_rgb[1, :] = equirect_3D_points[1, :]
     equirect_3D_points_rgb[2, :] = equirect_3D_points[2, :]
 
-    fisheye2equirec = np.zeros((3, equirect_3D_points.shape[-1]), dtype=np.float)
+    fisheye2equirec = np.zeros((3, equirect_3D_points.shape[-1]), dtype=float)
     #   Lines 200-205 is for converting the whole fisheye to equirectangular
     #   Points at the back of the cylinder are mapped to nan
     nan_boolean = np.bitwise_not(np.isnan(np.sum(equirect_3D_points.T, axis=1)))
@@ -538,7 +538,7 @@ def sample_img(img, cam_model, fov=53, run_midas=False):
             world_cs = np.linalg.inv(pinhole_camera['rotation']) @ pinhole_cs
 
             #   Fetch RGB from fisheye image to assemble perspective subview
-            fetch_from = world2cam(world_cs.T, cam_model).astype(np.int)
+            fetch_from = world2cam(world_cs.T, cam_model).astype(int)
             fetch_from[:, 0] = np.clip(fetch_from[:, 0], 0, width-1)
             fetch_from[:, 1] = np.clip(fetch_from[:, 1], 0, height-1)
             virtual2fisheye_idxs = np.dstack((fetch_from[:, 0].reshape(canvas_size), fetch_from[:, 1].reshape(canvas_size)))
@@ -770,7 +770,7 @@ def generate_camera_orientation(hfov_fisheye, vfov_fisheye, hfov_pinhole, vfov_p
     overlap_area_v = v_index[0] + vfov_pinhole / 2.0 - (v_index[1] - vfov_pinhole / 2.0)
     log.debug("the vertical overlap angle is {}".format(overlap_area_v))
 
-    z_rotation = np.zeros(x_rotation.shape, np.float)
+    z_rotation = np.zeros(x_rotation.shape, float)
     xyz_rotation_array = np.stack((x_rotation, y_rotation, z_rotation), axis=0)
     xyz_rotation_array = xyz_rotation_array.reshape([3, horizontal_size * vertical_size])
     return xyz_rotation_array
diff --git a/code/python/src/utility/depth_stitch.py b/code/python/src/utility/depth_stitch.py
index a8b334d1ea31f159bba3d4db67b34946cc186e79..5bb75eb5330b2fca9a2dd4a8fe5e592998e7b4f4 100644
--- a/code/python/src/utility/depth_stitch.py
+++ b/code/python/src/utility/depth_stitch.py
@@ -57,8 +57,8 @@ def stitch_depth_subimage(depth_data_list, image_param_list, fisheye_model):
     fisheye_image_height = fisheye_model["intrinsics"]["image_size"][0]
     fisheye_image_width = fisheye_model["intrinsics"]["image_size"][1]
 
-    fisheye_depth = np.zeros((fisheye_image_height, fisheye_image_width), np.float)
-    fisheye_image_weight = np.zeros((fisheye_image_height, fisheye_image_width), np.float)
+    fisheye_depth = np.zeros((fisheye_image_height, fisheye_image_width), float)
+    fisheye_image_weight = np.zeros((fisheye_image_height, fisheye_image_width), float)
 
     # project the pinhole image to 3D
     x_list = np.linspace(0, fisheye_image_width, fisheye_image_width, endpoint=False)
@@ -131,7 +131,7 @@ def find_corresponding(src_image, src_param, tar_image, tar_param, fisheye_model
     src_image_u_list = np.linspace(0, src_image_width, src_image_width, endpoint=False)  # x
     src_image_v_list = np.linspace(0, src_image_height, src_image_height, endpoint=False)  # y
     src_image_grid_u, src_image_grid_v = np.meshgrid(src_image_u_list, src_image_v_list)
-    src_image_grid_z = np.ones(src_image_grid_u.shape, np.float)
+    src_image_grid_z = np.ones(src_image_grid_u.shape, float)
     src_image_2d_points = np.stack((src_image_grid_u.ravel(), src_image_grid_v.ravel(), src_image_grid_z.ravel()), axis=1)
 
     # project the pinhole image to world coords (spherical 3D points)
@@ -165,8 +165,8 @@ def find_corresponding(src_image, src_param, tar_image, tar_param, fisheye_model
     if src_image_2d_points_available.shape[0] == 0:
         log.debug("the do not have overlap between two images.")
     else:
-        src_image_avail_pixel_data = src_image[src_image_2d_points_available.astype(np.int)]
-        tar_image_avail_pixel_data = tar_image[tar_image_2d_points_available.astype(np.int)]
+        src_image_avail_pixel_data = src_image[src_image_2d_points_available.astype(int)]
+        tar_image_avail_pixel_data = tar_image[tar_image_2d_points_available.astype(int)]
         rms = np.sqrt(np.mean((src_image_avail_pixel_data - tar_image_avail_pixel_data) ** 2))
         log.debug("The corresponding pixel rms is {}".format(rms))
 
diff --git a/code/python/src/utility/depthmap_utils.py b/code/python/src/utility/depthmap_utils.py
index 1ec02d7c9113e9a3e4cf5eafec0382bc75682670..860494212aa7449013383fe4d801760b30f262af 100644
--- a/code/python/src/utility/depthmap_utils.py
+++ b/code/python/src/utility/depthmap_utils.py
@@ -757,7 +757,7 @@ def dispmap_normalize(dispmap, method = "", mask = None):
     :rtype: numpy
     """
     if mask is None:
-        mask = np.ones_like(dispmap, dtype= np.bool)
+        mask = np.ones_like(dispmap, dtype= bool)
 
     dispmap_norm = None
     if method == "naive":
diff --git a/code/python/src/utility/gnomonic_projection.py b/code/python/src/utility/gnomonic_projection.py
index 80524126f49b6c83fde489caa783335a8fcd0438..c4d07063fdc4d0e5a7a0ce6ccab53aba10b65d4b 100644
--- a/code/python/src/utility/gnomonic_projection.py
+++ b/code/python/src/utility/gnomonic_projection.py
@@ -96,7 +96,7 @@ def gnomonic_projection(theta, phi, theta_0, phi_0):
     # get cos_c's zero element index
     zeros_index = cos_c == 0
     if np.any(zeros_index):
-        cos_c[zeros_index] = np.finfo(np.float).eps
+        cos_c[zeros_index] = np.finfo(float).eps
 
     x = np.cos(phi) * np.sin(theta - theta_0) / cos_c
     y = (np.cos(phi_0) * np.sin(phi) - np.sin(phi_0) * np.cos(phi) * np.cos(theta - theta_0)) / cos_c
@@ -128,7 +128,7 @@ def reverse_gnomonic_projection(x, y, lambda_0, phi_1):
     # get rho's zero element index
     zeros_index = rho == 0
     if np.any(zeros_index):
-        rho[zeros_index] = np.finfo(np.float).eps
+        rho[zeros_index] = np.finfo(float).eps
 
     c = np.arctan2(rho, 1)
     phi_ = np.arcsin(np.cos(c) * np.sin(phi_1) + (y * np.sin(c) * np.cos(phi_1)) / rho)
@@ -187,11 +187,11 @@ def gnomonic2pixel(coord_gnom_x, coord_gnom_y,
     # TODO check add the padding whether necessary
     gnomonic2image_width_ratio = (tangent_image_width - 1.0) / (x_max - x_min + padding_size * 2.0)
     coord_pixel_x = (coord_gnom_x - x_min + padding_size) * gnomonic2image_width_ratio
-    coord_pixel_x = (coord_pixel_x + 0.5).astype(np.int)
+    coord_pixel_x = (coord_pixel_x + 0.5).astype(int)
 
     gnomonic2image_height_ratio = (tangent_image_height - 1.0) / (y_max - y_min + padding_size * 2.0)
     coord_pixel_y = -(coord_gnom_y - y_max - padding_size) * gnomonic2image_height_ratio
-    coord_pixel_y = (coord_pixel_y + 0.5).astype(np.int)
+    coord_pixel_y = (coord_pixel_y + 0.5).astype(int)
 
     return coord_pixel_x, coord_pixel_y
 
diff --git a/code/python/src/utility/image_io.py b/code/python/src/utility/image_io.py
index f1ba6655399ee3e8e627da07bd256b69c2b78e7d..c77903663c60d591ad037e78c6c8c9fbd86446b2 100644
--- a/code/python/src/utility/image_io.py
+++ b/code/python/src/utility/image_io.py
@@ -112,7 +112,7 @@ def image_save(image_data, image_file_path):
     """
     # 0) convert the datatype
     image = None
-    if image_data.dtype in [np.float, np.int64, np.int]:
+    if image_data.dtype in [float, np.int64, int]:
         print("saved image array type is {}, converting to uint8".format(image_data.dtype))
         image = image_data.astype(np.uint8)
     else:
diff --git a/code/python/src/utility/pointcloud_utils.py b/code/python/src/utility/pointcloud_utils.py
index d973230056d4f724280859f515571be43f0d17f4..7e6304c54d3d0c20ee322e77bff54d6b83622f72 100644
--- a/code/python/src/utility/pointcloud_utils.py
+++ b/code/python/src/utility/pointcloud_utils.py
@@ -107,7 +107,7 @@ def depthmap2pointclouds_perspective(depth_map, rgb_image, cam_int_param, output
     x_list = np.linspace(0, image_width, image_width, endpoint=False)
     y_list = np.linspace(0, image_height, image_height, endpoint=False)
     grid_x, grid_y = np.meshgrid(x_list, y_list)
-    gird_z = np.ones(grid_x.shape, np.float)
+    gird_z = np.ones(grid_x.shape, float)
     points_2d_pixel = np.stack((grid_x.ravel(), grid_y.ravel(), gird_z.ravel()), axis=1)
     points_2d_pixel = np.multiply(points_2d_pixel.T, depth_map.ravel())
     points_3d_pixel = np.linalg.inv(cam_int_param) @ points_2d_pixel
diff --git a/code/python/src/utility/polygon.py b/code/python/src/utility/polygon.py
index 0c57e2204c13d2ebbcb244d05b4e56a5a5ee9397..174e7ea3bd34b5edac8a7a43b4dfc41fea398b91 100644
--- a/code/python/src/utility/polygon.py
+++ b/code/python/src/utility/polygon.py
@@ -85,7 +85,7 @@ def enlarge_polygon(old_points, offset):
 
         # 1) Move the points by the offset.
         # the points of line parallel to ij
-        v1 = np.array([old_points[j][0] - old_points[i][0], old_points[j][1] - old_points[i][1]], np.float)
+        v1 = np.array([old_points[j][0] - old_points[i][0], old_points[j][1] - old_points[i][1]], float)
         norm = np.linalg.norm(v1)
         v1 = v1 / norm * offset
         n1 = [-v1[1], v1[0]]
@@ -93,7 +93,7 @@ def enlarge_polygon(old_points, offset):
         pij2 = [old_points[j][0] + n1[0], old_points[j][1] + n1[1]]
 
         # the points of line parallel to jk
-        v2 = np.array([old_points[k][0] - old_points[j][0], old_points[k][1] - old_points[j][1]], np.float)
+        v2 = np.array([old_points[k][0] - old_points[j][0], old_points[k][1] - old_points[j][1]], float)
         norm = np.linalg.norm(v2)
         v2 = v2 / norm * offset
         n2 = [-v2[1], v2[0]]
diff --git a/code/python/src/utility/projection_icosahedron.py b/code/python/src/utility/projection_icosahedron.py
index 3ce4d7d57eefda6fcd7cdef0e28c7d9d51dd8f4c..2357e86b8011fc0f134ca967bfaace67b513ad6f 100644
--- a/code/python/src/utility/projection_icosahedron.py
+++ b/code/python/src/utility/projection_icosahedron.py
@@ -280,7 +280,7 @@ def erp2ico_image(erp_image, tangent_image_width, padding_size=0.0, full_face_im
         gnom_range_xv, gnom_range_yv = np.meshgrid(gnom_range_x, gnom_range_y)
 
         # the tangent triangle points coordinate in tangent image
-        inside_list = np.full(gnom_range_xv.shape[:2], True, dtype=np.bool)
+        inside_list = np.full(gnom_range_xv.shape[:2], True, dtype=bool)
         if not full_face_image:
             gnom_range_xyv = np.stack((gnom_range_xv.flatten(), gnom_range_yv.flatten()), axis=1)
             pixel_eps = (gnomonic_x_max - gnomonic_x_min) / (tangent_image_width)
@@ -433,7 +433,7 @@ def ico2erp_image(tangent_images, erp_image_height, padding_size=0.0, blender_me
                                                        0.0, tangent_image_width, tangent_image_height, tangent_gnomonic_range)
 
             for channel in range(0, images_channels_number):
-                erp_image[triangle_yv[available_pixels_list].astype(np.int), triangle_xv[available_pixels_list].astype(np.int), channel] = \
+                erp_image[triangle_yv[available_pixels_list].astype(int), triangle_xv[available_pixels_list].astype(int), channel] = \
                     ndimage.map_coordinates(tangent_images_subimage[:, :, channel], [tangent_yv, tangent_xv], order=1, mode='constant', cval=255)
         elif blender_method == "mean":
             triangle_points_tangent = [[gnomonic_x_min, gnomonic_y_max],
@@ -447,14 +447,14 @@ def ico2erp_image(tangent_images, erp_image_height, padding_size=0.0, blender_me
                                                        0.0, tangent_image_width, tangent_image_height, tangent_gnomonic_range)
             for channel in range(0, images_channels_number):
                 erp_face_image = ndimage.map_coordinates(tangent_images_subimage[:, :, channel], [tangent_yv, tangent_xv], order=1, mode='constant', cval=255)
-                erp_image[triangle_yv[available_pixels_list].astype(np.int), triangle_xv[available_pixels_list].astype(np.int), channel] += erp_face_image.astype(np.float64)
+                erp_image[triangle_yv[available_pixels_list].astype(int), triangle_xv[available_pixels_list].astype(int), channel] += erp_face_image.astype(np.float64)
 
             face_weight_mat = np.ones(erp_face_image.shape, np.float64)
             erp_weight_mat[triangle_yv[available_pixels_list].astype(np.int64), triangle_xv[available_pixels_list].astype(np.int64)] += face_weight_mat
 
     # compute the final optical flow base on weight
     if blender_method == "mean":
-        # erp_flow_weight_mat = np.full(erp_flow_weight_mat.shape, erp_flow_weight_mat.max(), np.float) # debug
+        # erp_flow_weight_mat = np.full(erp_flow_weight_mat.shape, erp_flow_weight_mat.max(), float) # debug
         non_zero_weight_list = erp_weight_mat != 0
         if not np.all(non_zero_weight_list):
             log.warn("the optical flow weight matrix contain 0.")
diff --git a/code/python/src/utility/spherical_coordinates.py b/code/python/src/utility/spherical_coordinates.py
index 32991ead7284d56321002b02ee687ac3840685c8..68565bee809ea0e3ef6fb6e49c9b1b56603e5c09 100644
--- a/code/python/src/utility/spherical_coordinates.py
+++ b/code/python/src/utility/spherical_coordinates.py
@@ -148,10 +148,10 @@ def car2sph(points_car, min_radius=1e-10):
 
     valid_list = radius > min_radius  # set the 0 radius to origin.
 
-    theta = np.zeros((points_car.shape[0]), np.float)
+    theta = np.zeros((points_car.shape[0]), float)
     theta[valid_list] = np.arctan2(points_car[:, 0][valid_list], points_car[:, 2][valid_list])
 
-    phi = np.zeros((points_car.shape[0]), np.float)
+    phi = np.zeros((points_car.shape[0]), float)
     phi[valid_list] = -np.arcsin(np.divide(points_car[:, 1][valid_list], radius[valid_list]))
 
     return np.stack((theta, phi), axis=1)
@@ -170,7 +170,7 @@ def sph2car(theta, phi, radius=1.0):
     :return: +x right, +y down, +z is froward, shape is [3, point_number]
     :rtype: numpy
     """
-    # points_cartesian_3d = np.array.zeros((theta.shape[0],3),np.float)
+    # points_cartesian_3d = np.array.zeros((theta.shape[0],3),float)
     x = radius * np.cos(phi) * np.sin(theta)
     z = radius * np.cos(phi) * np.cos(theta)
     y = -radius * np.sin(phi)
diff --git a/code/python/src/utility/subimage.py b/code/python/src/utility/subimage.py
index dc911163deee0cb1dc6773d4143d550d4857eebe..2ecd22125732158bb87e8dade1ef3edce45f897d 100644
--- a/code/python/src/utility/subimage.py
+++ b/code/python/src/utility/subimage.py
@@ -83,7 +83,7 @@ def draw_corresponding(src_image_data, tar_image_data, pixel_corresponding_array
 
     # 2) warp src image
     src_warp_image = np.zeros(src_image_data.shape, src_image_data.dtype)
-    pixel_corresponding_array_temp = pixel_corresponding_array.astype(np.int)
+    pixel_corresponding_array_temp = pixel_corresponding_array.astype(int)
     src_y = pixel_corresponding_array_temp[:, 0]
     src_x = pixel_corresponding_array_temp[:, 1]
     tar_y = pixel_corresponding_array_temp[:, 2]
@@ -328,7 +328,7 @@ def erp_ico_proj(erp_image, padding_size, tangent_image_width, corr_downsample_f
             # down-sample the pixel corresponding relationship
             if corr_downsample_factor != 1.0 and pixels_corr_dict_subimage[subimage_index_tar] is not None:
                 corr_number = pixels_corr_dict_subimage[subimage_index_tar].shape[0]
-                corr_index = np.linspace(0, corr_number -1, num = int(corr_number * corr_downsample_factor)).astype(np.int)
+                corr_index = np.linspace(0, corr_number -1, num = int(corr_number * corr_downsample_factor)).astype(int)
                 corr_index = np.unique(corr_index)
                 pixels_corr_dict_subimage[subimage_index_tar]  = pixels_corr_dict_subimage[subimage_index_tar][corr_index,:]
             
@@ -410,7 +410,7 @@ def erp_ico_draw_corresponding(src_image_data, tar_image_data, pixel_correspondi
 
     # 1) warp src image
     src_warp = np.zeros(src_image_data_np.shape, src_image_data_np.dtype)
-    pixel_corresponding_array_temp = pixel_corresponding_array.astype(np.int)
+    pixel_corresponding_array_temp = pixel_corresponding_array.astype(int)
     src_y = pixel_corresponding_array_temp[:, 0]
     src_x = pixel_corresponding_array_temp[:, 1]
     tar_y = pixel_corresponding_array_temp[:, 2]