@@ -18,11 +18,9 @@ To install the FastEmbed library, pip works best. You can install it with or wit
18
18
19
19
``` bash
20
20
pip install fastembed
21
- ```
22
21
23
- ### ⚡️ With GPU
22
+ # or with GPU support
24
23
25
- ``` bash
26
24
pip install fastembed-gpu
27
25
```
28
26
@@ -48,7 +46,99 @@ embeddings_list = list(embedding_model.embed(documents))
48
46
len (embeddings_list[0 ]) # Vector of 384 dimensions
49
47
```
50
48
51
- ### ⚡️ FastEmbed on a GPU
49
+ Fastembed supports a variety of models for different tasks and modalities.
50
+ The list of all the available models can be found [ here] ( https://qdrant.github.io/fastembed/examples/Supported_Models/ )
51
+ ### 🎒 Dense text embeddings
52
+
53
+ ``` python
54
+ from fastembed import TextEmbedding
55
+
56
+ model = TextEmbedding(model_name = " BAAI/bge-small-en-v1.5" )
57
+ embeddings = list (embedding_model.embed(documents))
58
+
59
+ # [
60
+ # array([-0.1115, 0.0097, 0.0052, 0.0195, ...], dtype=float32),
61
+ # array([-0.1019, 0.0635, -0.0332, 0.0522, ...], dtype=float32)
62
+ # ]
63
+
64
+ ```
65
+
66
+
67
+
68
+ ### 🔱 Sparse text embeddings
69
+
70
+ * SPLADE++
71
+
72
+ ``` python
73
+ from fastembed import SparseTextEmbedding
74
+
75
+ model = SparseTextEmbedding(model_name = " prithivida/Splade_PP_en_v1" )
76
+ embeddings = list (embedding_model.embed(documents))
77
+
78
+ # [
79
+ # SparseEmbedding(indices=[ 17, 123, 919, ... ], values=[0.71, 0.22, 0.39, ...]),
80
+ # SparseEmbedding(indices=[ 38, 12, 91, ... ], values=[0.11, 0.22, 0.39, ...])
81
+ # ]
82
+ ```
83
+
84
+ <!--
85
+ * BM42 - ([link](ToDo))
86
+
87
+ ```
88
+ from fastembed import SparseTextEmbedding
89
+
90
+ model = SparseTextEmbedding(model_name="Qdrant/bm42-all-minilm-l6-v2-attentions")
91
+ embeddings = list(embedding_model.embed(documents))
92
+
93
+ # [
94
+ # SparseEmbedding(indices=[ 17, 123, 919, ... ], values=[0.71, 0.22, 0.39, ...]),
95
+ # SparseEmbedding(indices=[ 38, 12, 91, ... ], values=[0.11, 0.22, 0.39, ...])
96
+ # ]
97
+ ```
98
+ -->
99
+
100
+ ### 🦥 Late interaction models (aka ColBERT)
101
+
102
+
103
+ ``` python
104
+ from fastembed import LateInteractionTextEmbedding
105
+
106
+ model = LateInteractionTextEmbedding(model_name = " colbert-ir/colbertv2.0" )
107
+ embeddings = list (embedding_model.embed(documents))
108
+
109
+ # [
110
+ # array([
111
+ # [-0.1115, 0.0097, 0.0052, 0.0195, ...],
112
+ # [-0.1019, 0.0635, -0.0332, 0.0522, ...],
113
+ # ]),
114
+ # array([
115
+ # [-0.9019, 0.0335, -0.0032, 0.0991, ...],
116
+ # [-0.2115, 0.8097, 0.1052, 0.0195, ...],
117
+ # ]),
118
+ # ]
119
+ ```
120
+
121
+ ### 🖼️ Image embeddings
122
+
123
+ ``` python
124
+ from fastembed import ImageEmbedding
125
+
126
+ images = [
127
+ " ./path/to/image1.jpg" ,
128
+ " ./path/to/image2.jpg" ,
129
+ ]
130
+
131
+ model = ImageEmbedding(model_name = " Qdrant/clip-ViT-B-32-vision" )
132
+ embeddings = list (embedding_model.embed(images))
133
+
134
+ # [
135
+ # array([-0.1115, 0.0097, 0.0052, 0.0195, ...], dtype=float32),
136
+ # array([-0.1019, 0.0635, -0.0332, 0.0522, ...], dtype=float32)
137
+ # ]
138
+ ```
139
+
140
+
141
+ ## ⚡️ FastEmbed on a GPU
52
142
53
143
FastEmbed supports running on GPU devices.
54
144
It requires installation of the ` fastembed-gpu ` package.
0 commit comments