CLIP-Notebook.ipynb
query_image = data['image'][0]

# preprocess image
inputs = processor(images=query_image, return_tensors="pt", padding=True).to(device)

# generate image embeddings
image_features = model.get_image_features(**inputs)

# normalize image embedding
image_features = image_features / image_features.norm(dim=-1, keepdim=True)
    
# Calculate similarity scores across all image embddings
similarity = torch.mm(image_features, image_emb.T)
    
# Get top-k matches
values, indices = similarity[0].topk(min(top_k, len(data)))