diff --git a/dino/eval_copy_detection.py b/dino/eval_copy_detection.py
index 73dcd507893f204a47a5036cc61bd65b30cf1ead..9bfe79e081a149790a1126119bd055e92ea6e255 100644
--- a/dino/eval_copy_detection.py
+++ b/dino/eval_copy_detection.py
@@ -25,9 +25,9 @@ from torchvision import transforms as pth_transforms
 from PIL import Image, ImageFile
 import numpy as np
 
-import utils
-import vision_transformer as vits
-from eval_knn import extract_features
+import dino.utils
+import dino.vision_transformer as vits
+from dino.eval_knn import extract_features
 
 
 class CopydaysDataset():
diff --git a/dino/eval_image_retrieval.py b/dino/eval_image_retrieval.py
index 999f8c9009a9abcc28308c5995c286f65b1522ac..612f4188ee5b3bc0cde38d2ca73b3caceae4e511 100644
--- a/dino/eval_image_retrieval.py
+++ b/dino/eval_image_retrieval.py
@@ -25,9 +25,9 @@ from torchvision import transforms as pth_transforms
 from PIL import Image, ImageFile
 import numpy as np
 
-import utils
-import vision_transformer as vits
-from eval_knn import extract_features
+import dino.utils
+import dino.vision_transformer as vits
+from dino.eval_knn import extract_features
 
 
 class OxfordParisDataset(torch.utils.data.Dataset):
diff --git a/dino/eval_knn.py b/dino/eval_knn.py
index fe99a26049cda2d764086727223e6cc9a8f2bfb8..53a776b31035eddf48a3dbdd90d6c89b1784b588 100644
--- a/dino/eval_knn.py
+++ b/dino/eval_knn.py
@@ -23,8 +23,8 @@ from torchvision import datasets
 from torchvision import transforms as pth_transforms
 from torchvision import models as torchvision_models
 
-import utils
-import vision_transformer as vits
+import dino.utils
+import dino.vision_transformer as vits
 
 
 def extract_feature_pipeline(args):
diff --git a/dino/eval_linear.py b/dino/eval_linear.py
index cdef16b473d216889b493aa0c7a63e15f945092c..ada7529e8da25088d83e425445d84d6b606cf6e0 100644
--- a/dino/eval_linear.py
+++ b/dino/eval_linear.py
@@ -24,8 +24,8 @@ from torchvision import datasets
 from torchvision import transforms as pth_transforms
 from torchvision import models as torchvision_models
 
-import utils
-import vision_transformer as vits
+import dino.utils
+import dino.vision_transformer as vits
 
 
 def eval_linear(args):
diff --git a/dino/eval_video_segmentation.py b/dino/eval_video_segmentation.py
index 08a18c475db9cbadb29d2e0f22113c0cc9efed49..e851971d57a5954bd5ab5074ca279770edf2dec9 100644
--- a/dino/eval_video_segmentation.py
+++ b/dino/eval_video_segmentation.py
@@ -30,8 +30,8 @@ from torch.nn import functional as F
 from PIL import Image
 from torchvision import transforms
 
-import utils
-import vision_transformer as vits
+import dino.utils
+import dino.vision_transformer as vits
 
 
 @torch.no_grad()
diff --git a/dino/main_dino.py b/dino/main_dino.py
index cade9873dcb1d1c6c69ddba61dbdcf3f01dd7540..317925d42af4e4d2993f0ddfe0cf42e8624ed3de 100644
--- a/dino/main_dino.py
+++ b/dino/main_dino.py
@@ -30,9 +30,9 @@ import torch.nn.functional as F
 from torchvision import datasets, transforms
 from torchvision import models as torchvision_models
 
-import utils
-import vision_transformer as vits
-from vision_transformer import DINOHead
+import dino.utils
+import dino.vision_transformer as vits
+from dino.vision_transformer import DINOHead
 
 torchvision_archs = sorted(name for name in torchvision_models.__dict__
     if name.islower() and not name.startswith("__")
diff --git a/dino/run_with_submitit.py b/dino/run_with_submitit.py
index 33d4116f2ff512b39d0cec5c936f999df1ac80fe..9a4696ba5379edeebe8d871c7f8de35ec10ce95d 100644
--- a/dino/run_with_submitit.py
+++ b/dino/run_with_submitit.py
@@ -20,7 +20,7 @@ import os
 import uuid
 from pathlib import Path
 
-import main_dino
+import dino.main_dino
 import submitit
 
 
diff --git a/dino/video_generation.py b/dino/video_generation.py
index 94da9836ad0e9bd8dccf0f989b93a93ed11cfd7e..f90ae86641ff280a37cfd882063a1385845d6e29 100644
--- a/dino/video_generation.py
+++ b/dino/video_generation.py
@@ -26,8 +26,8 @@ from torchvision import transforms as pth_transforms
 import numpy as np
 from PIL import Image
 
-import utils
-import vision_transformer as vits
+import dino.utils
+import dino.vision_transformer as vits
 
 
 FOURCC = {
diff --git a/dino/vision_transformer.py b/dino/vision_transformer.py
index f69a7ad0522500ca2a85305a789be5ca6ac474d0..4ad80046902931e5b96e9b23ff73fba8a9b44b87 100644
--- a/dino/vision_transformer.py
+++ b/dino/vision_transformer.py
@@ -21,7 +21,7 @@ from functools import partial
 import torch
 import torch.nn as nn
 
-from utils import trunc_normal_
+from dino.utils import trunc_normal_
 
 
 def drop_path(x, drop_prob: float = 0., training: bool = False):
diff --git a/dino/visualize_attention.py b/dino/visualize_attention.py
index 4288265b9b8865bebfcaad1d350a114da35ff055..09891e6dea9bba5cc60aa6493a0a76805634db2b 100644
--- a/dino/visualize_attention.py
+++ b/dino/visualize_attention.py
@@ -31,8 +31,8 @@ from torchvision import transforms as pth_transforms
 import numpy as np
 from PIL import Image
 
-import utils
-import vision_transformer as vits
+import dino.utils
+import dino.vision_transformer as vits
 
 
 def apply_mask(image, mask, color, alpha=0.5):