-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathimage_searcher.py
25 lines (22 loc) · 1.18 KB
/
image_searcher.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
import torch
import clip
import pickle
class ImageSearcher:
def __init__(self):
self.device = "cuda" if torch.cuda.is_available() else "cpu"
self.model, self.preprocess = clip.load("ViT-B/32", device=self.device)
self.embeddings, self.paths = self.load_embeddings()
def load_embeddings(self):
with open('image_embeddings.pkl', 'rb') as f:
data = pickle.load(f)
return data['embeddings'], data['paths']
def find_similar_images(self, text_description, number_of_images):
text = clip.tokenize([text_description]).to(self.device)
with torch.no_grad():
text_features = self.model.encode_text(text)
embeddings_norm = self.embeddings / self.embeddings.norm(dim=1, keepdim=True)
text_features_norm = text_features / text_features.norm(dim=1, keepdim=True)
similarities = (text_features_norm @ embeddings_norm.T).squeeze(0)
best_indices = similarities.argsort(descending=True)[:number_of_images]
adjusted_similarities = (similarities + 1) / 2
return [(self.paths[i], adjusted_similarities[i].item(), self.embeddings[i]) for i in best_indices], text_features