rag/rag_fr_chat.ipynb

533 lines
45 KiB
Plaintext

{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"id": "612c8bdb-83a8-4882-96a5-513ac7aedd7b",
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"/Users/peportier/miniforge3/envs/RAG_ENV/lib/python3.9/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
" from .autonotebook import tqdm as notebook_tqdm\n",
"/Users/peportier/miniforge3/envs/RAG_ENV/lib/python3.9/site-packages/transformers/utils/generic.py:441: UserWarning: torch.utils._pytree._register_pytree_node is deprecated. Please use torch.utils._pytree.register_pytree_node instead.\n",
" _torch_pytree._register_pytree_node(\n"
]
}
],
"source": [
"import importlib\n",
"import rag\n",
"importlib.reload(rag)\n",
"from rag import RAG\n",
"from IPython.display import Markdown, display"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "98130049-a4de-4532-8454-3df1a13094e7",
"metadata": {
"collapsed": true,
"jupyter": {
"outputs_hidden": true
}
},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"2024-01-04 09:15:27,599 - INFO - Load pretrained SentenceTransformer: intfloat/multilingual-e5-large\n",
"/Users/peportier/miniforge3/envs/RAG_ENV/lib/python3.9/site-packages/transformers/utils/generic.py:309: UserWarning: torch.utils._pytree._register_pytree_node is deprecated. Please use torch.utils._pytree.register_pytree_node instead.\n",
" _torch_pytree._register_pytree_node(\n",
"2024-01-04 09:15:31,253 - INFO - Use pytorch device: cpu\n",
"2024-01-04 09:15:31,257 - INFO - Anonymized telemetry enabled. See https://docs.trychroma.com/telemetry for more information.\n",
"llama_model_loader: loaded meta data with 21 key-value pairs and 291 tensors from /Users/peportier/llm/a/a/zephyr-7b-beta.Q5_K_M.gguf (version GGUF V3 (latest))\n",
"llama_model_loader: - tensor 0: token_embd.weight q5_K [ 4096, 32000, 1, 1 ]\n",
"llama_model_loader: - tensor 1: blk.0.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
"llama_model_loader: - tensor 2: blk.0.ffn_down.weight q6_K [ 14336, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 3: blk.0.ffn_gate.weight q5_K [ 4096, 14336, 1, 1 ]\n",
"llama_model_loader: - tensor 4: blk.0.ffn_up.weight q5_K [ 4096, 14336, 1, 1 ]\n",
"llama_model_loader: - tensor 5: blk.0.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
"llama_model_loader: - tensor 6: blk.0.attn_k.weight q5_K [ 4096, 1024, 1, 1 ]\n",
"llama_model_loader: - tensor 7: blk.0.attn_output.weight q5_K [ 4096, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 8: blk.0.attn_q.weight q5_K [ 4096, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 9: blk.0.attn_v.weight q6_K [ 4096, 1024, 1, 1 ]\n",
"llama_model_loader: - tensor 10: blk.1.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
"llama_model_loader: - tensor 11: blk.1.ffn_down.weight q6_K [ 14336, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 12: blk.1.ffn_gate.weight q5_K [ 4096, 14336, 1, 1 ]\n",
"llama_model_loader: - tensor 13: blk.1.ffn_up.weight q5_K [ 4096, 14336, 1, 1 ]\n",
"llama_model_loader: - tensor 14: blk.1.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
"llama_model_loader: - tensor 15: blk.1.attn_k.weight q5_K [ 4096, 1024, 1, 1 ]\n",
"llama_model_loader: - tensor 16: blk.1.attn_output.weight q5_K [ 4096, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 17: blk.1.attn_q.weight q5_K [ 4096, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 18: blk.1.attn_v.weight q6_K [ 4096, 1024, 1, 1 ]\n",
"llama_model_loader: - tensor 19: blk.2.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
"llama_model_loader: - tensor 20: blk.2.ffn_down.weight q6_K [ 14336, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 21: blk.2.ffn_gate.weight q5_K [ 4096, 14336, 1, 1 ]\n",
"llama_model_loader: - tensor 22: blk.2.ffn_up.weight q5_K [ 4096, 14336, 1, 1 ]\n",
"llama_model_loader: - tensor 23: blk.2.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
"llama_model_loader: - tensor 24: blk.2.attn_k.weight q5_K [ 4096, 1024, 1, 1 ]\n",
"llama_model_loader: - tensor 25: blk.2.attn_output.weight q5_K [ 4096, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 26: blk.2.attn_q.weight q5_K [ 4096, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 27: blk.2.attn_v.weight q6_K [ 4096, 1024, 1, 1 ]\n",
"llama_model_loader: - tensor 28: blk.3.ffn_gate.weight q5_K [ 4096, 14336, 1, 1 ]\n",
"llama_model_loader: - tensor 29: blk.3.ffn_up.weight q5_K [ 4096, 14336, 1, 1 ]\n",
"llama_model_loader: - tensor 30: blk.3.attn_k.weight q5_K [ 4096, 1024, 1, 1 ]\n",
"llama_model_loader: - tensor 31: blk.3.attn_output.weight q5_K [ 4096, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 32: blk.3.attn_q.weight q5_K [ 4096, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 33: blk.3.attn_v.weight q6_K [ 4096, 1024, 1, 1 ]\n",
"llama_model_loader: - tensor 34: blk.3.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
"llama_model_loader: - tensor 35: blk.3.ffn_down.weight q6_K [ 14336, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 36: blk.3.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
"llama_model_loader: - tensor 37: blk.4.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
"llama_model_loader: - tensor 38: blk.4.ffn_down.weight q5_K [ 14336, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 39: blk.4.ffn_gate.weight q5_K [ 4096, 14336, 1, 1 ]\n",
"llama_model_loader: - tensor 40: blk.4.ffn_up.weight q5_K [ 4096, 14336, 1, 1 ]\n",
"llama_model_loader: - tensor 41: blk.4.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
"llama_model_loader: - tensor 42: blk.4.attn_k.weight q5_K [ 4096, 1024, 1, 1 ]\n",
"llama_model_loader: - tensor 43: blk.4.attn_output.weight q5_K [ 4096, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 44: blk.4.attn_q.weight q5_K [ 4096, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 45: blk.4.attn_v.weight q5_K [ 4096, 1024, 1, 1 ]\n",
"llama_model_loader: - tensor 46: blk.5.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
"llama_model_loader: - tensor 47: blk.5.ffn_down.weight q5_K [ 14336, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 48: blk.5.ffn_gate.weight q5_K [ 4096, 14336, 1, 1 ]\n",
"llama_model_loader: - tensor 49: blk.5.ffn_up.weight q5_K [ 4096, 14336, 1, 1 ]\n",
"llama_model_loader: - tensor 50: blk.5.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
"llama_model_loader: - tensor 51: blk.5.attn_k.weight q5_K [ 4096, 1024, 1, 1 ]\n",
"llama_model_loader: - tensor 52: blk.5.attn_output.weight q5_K [ 4096, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 53: blk.5.attn_q.weight q5_K [ 4096, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 54: blk.5.attn_v.weight q5_K [ 4096, 1024, 1, 1 ]\n",
"llama_model_loader: - tensor 55: blk.6.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
"llama_model_loader: - tensor 56: blk.6.ffn_down.weight q6_K [ 14336, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 57: blk.6.ffn_gate.weight q5_K [ 4096, 14336, 1, 1 ]\n",
"llama_model_loader: - tensor 58: blk.6.ffn_up.weight q5_K [ 4096, 14336, 1, 1 ]\n",
"llama_model_loader: - tensor 59: blk.6.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
"llama_model_loader: - tensor 60: blk.6.attn_k.weight q5_K [ 4096, 1024, 1, 1 ]\n",
"llama_model_loader: - tensor 61: blk.6.attn_output.weight q5_K [ 4096, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 62: blk.6.attn_q.weight q5_K [ 4096, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 63: blk.6.attn_v.weight q6_K [ 4096, 1024, 1, 1 ]\n",
"llama_model_loader: - tensor 64: blk.7.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
"llama_model_loader: - tensor 65: blk.7.ffn_down.weight q5_K [ 14336, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 66: blk.7.ffn_gate.weight q5_K [ 4096, 14336, 1, 1 ]\n",
"llama_model_loader: - tensor 67: blk.7.ffn_up.weight q5_K [ 4096, 14336, 1, 1 ]\n",
"llama_model_loader: - tensor 68: blk.7.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
"llama_model_loader: - tensor 69: blk.7.attn_k.weight q5_K [ 4096, 1024, 1, 1 ]\n",
"llama_model_loader: - tensor 70: blk.7.attn_output.weight q5_K [ 4096, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 71: blk.7.attn_q.weight q5_K [ 4096, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 72: blk.7.attn_v.weight q5_K [ 4096, 1024, 1, 1 ]\n",
"llama_model_loader: - tensor 73: blk.8.attn_k.weight q5_K [ 4096, 1024, 1, 1 ]\n",
"llama_model_loader: - tensor 74: blk.8.attn_output.weight q5_K [ 4096, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 75: blk.8.attn_q.weight q5_K [ 4096, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 76: blk.8.attn_v.weight q5_K [ 4096, 1024, 1, 1 ]\n",
"llama_model_loader: - tensor 77: blk.10.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
"llama_model_loader: - tensor 78: blk.10.ffn_down.weight q5_K [ 14336, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 79: blk.10.ffn_gate.weight q5_K [ 4096, 14336, 1, 1 ]\n",
"llama_model_loader: - tensor 80: blk.10.ffn_up.weight q5_K [ 4096, 14336, 1, 1 ]\n",
"llama_model_loader: - tensor 81: blk.10.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
"llama_model_loader: - tensor 82: blk.10.attn_k.weight q5_K [ 4096, 1024, 1, 1 ]\n",
"llama_model_loader: - tensor 83: blk.10.attn_output.weight q5_K [ 4096, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 84: blk.10.attn_q.weight q5_K [ 4096, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 85: blk.10.attn_v.weight q6_K [ 4096, 1024, 1, 1 ]\n",
"llama_model_loader: - tensor 86: blk.11.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
"llama_model_loader: - tensor 87: blk.11.ffn_down.weight q6_K [ 14336, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 88: blk.11.ffn_gate.weight q5_K [ 4096, 14336, 1, 1 ]\n",
"llama_model_loader: - tensor 89: blk.11.ffn_up.weight q5_K [ 4096, 14336, 1, 1 ]\n",
"llama_model_loader: - tensor 90: blk.11.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
"llama_model_loader: - tensor 91: blk.11.attn_k.weight q5_K [ 4096, 1024, 1, 1 ]\n",
"llama_model_loader: - tensor 92: blk.11.attn_output.weight q5_K [ 4096, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 93: blk.11.attn_q.weight q5_K [ 4096, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 94: blk.11.attn_v.weight q5_K [ 4096, 1024, 1, 1 ]\n",
"llama_model_loader: - tensor 95: blk.12.ffn_gate.weight q5_K [ 4096, 14336, 1, 1 ]\n",
"llama_model_loader: - tensor 96: blk.12.ffn_up.weight q5_K [ 4096, 14336, 1, 1 ]\n",
"llama_model_loader: - tensor 97: blk.12.attn_k.weight q5_K [ 4096, 1024, 1, 1 ]\n",
"llama_model_loader: - tensor 98: blk.12.attn_output.weight q5_K [ 4096, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 99: blk.12.attn_q.weight q5_K [ 4096, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 100: blk.12.attn_v.weight q5_K [ 4096, 1024, 1, 1 ]\n",
"llama_model_loader: - tensor 101: blk.8.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
"llama_model_loader: - tensor 102: blk.8.ffn_down.weight q5_K [ 14336, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 103: blk.8.ffn_gate.weight q5_K [ 4096, 14336, 1, 1 ]\n",
"llama_model_loader: - tensor 104: blk.8.ffn_up.weight q5_K [ 4096, 14336, 1, 1 ]\n",
"llama_model_loader: - tensor 105: blk.8.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
"llama_model_loader: - tensor 106: blk.9.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
"llama_model_loader: - tensor 107: blk.9.ffn_down.weight q5_K [ 14336, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 108: blk.9.ffn_gate.weight q5_K [ 4096, 14336, 1, 1 ]\n",
"llama_model_loader: - tensor 109: blk.9.ffn_up.weight q5_K [ 4096, 14336, 1, 1 ]\n",
"llama_model_loader: - tensor 110: blk.9.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
"llama_model_loader: - tensor 111: blk.9.attn_k.weight q5_K [ 4096, 1024, 1, 1 ]\n",
"llama_model_loader: - tensor 112: blk.9.attn_output.weight q5_K [ 4096, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 113: blk.9.attn_q.weight q5_K [ 4096, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 114: blk.9.attn_v.weight q6_K [ 4096, 1024, 1, 1 ]\n",
"llama_model_loader: - tensor 115: blk.12.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
"llama_model_loader: - tensor 116: blk.12.ffn_down.weight q6_K [ 14336, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 117: blk.12.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
"llama_model_loader: - tensor 118: blk.13.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
"llama_model_loader: - tensor 119: blk.13.ffn_down.weight q5_K [ 14336, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 120: blk.13.ffn_gate.weight q5_K [ 4096, 14336, 1, 1 ]\n",
"llama_model_loader: - tensor 121: blk.13.ffn_up.weight q5_K [ 4096, 14336, 1, 1 ]\n",
"llama_model_loader: - tensor 122: blk.13.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
"llama_model_loader: - tensor 123: blk.13.attn_k.weight q5_K [ 4096, 1024, 1, 1 ]\n",
"llama_model_loader: - tensor 124: blk.13.attn_output.weight q5_K [ 4096, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 125: blk.13.attn_q.weight q5_K [ 4096, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 126: blk.13.attn_v.weight q5_K [ 4096, 1024, 1, 1 ]\n",
"llama_model_loader: - tensor 127: blk.14.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
"llama_model_loader: - tensor 128: blk.14.ffn_down.weight q5_K [ 14336, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 129: blk.14.ffn_gate.weight q5_K [ 4096, 14336, 1, 1 ]\n",
"llama_model_loader: - tensor 130: blk.14.ffn_up.weight q5_K [ 4096, 14336, 1, 1 ]\n",
"llama_model_loader: - tensor 131: blk.14.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
"llama_model_loader: - tensor 132: blk.14.attn_k.weight q5_K [ 4096, 1024, 1, 1 ]\n",
"llama_model_loader: - tensor 133: blk.14.attn_output.weight q5_K [ 4096, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 134: blk.14.attn_q.weight q5_K [ 4096, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 135: blk.14.attn_v.weight q5_K [ 4096, 1024, 1, 1 ]\n",
"llama_model_loader: - tensor 136: blk.15.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
"llama_model_loader: - tensor 137: blk.15.ffn_down.weight q6_K [ 14336, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 138: blk.15.ffn_gate.weight q5_K [ 4096, 14336, 1, 1 ]\n",
"llama_model_loader: - tensor 139: blk.15.ffn_up.weight q5_K [ 4096, 14336, 1, 1 ]\n",
"llama_model_loader: - tensor 140: blk.15.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
"llama_model_loader: - tensor 141: blk.15.attn_k.weight q5_K [ 4096, 1024, 1, 1 ]\n",
"llama_model_loader: - tensor 142: blk.15.attn_output.weight q5_K [ 4096, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 143: blk.15.attn_q.weight q5_K [ 4096, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 144: blk.15.attn_v.weight q6_K [ 4096, 1024, 1, 1 ]\n",
"llama_model_loader: - tensor 145: blk.16.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
"llama_model_loader: - tensor 146: blk.16.ffn_down.weight q5_K [ 14336, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 147: blk.16.ffn_gate.weight q5_K [ 4096, 14336, 1, 1 ]\n",
"llama_model_loader: - tensor 148: blk.16.ffn_up.weight q5_K [ 4096, 14336, 1, 1 ]\n",
"llama_model_loader: - tensor 149: blk.16.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
"llama_model_loader: - tensor 150: blk.16.attn_k.weight q5_K [ 4096, 1024, 1, 1 ]\n",
"llama_model_loader: - tensor 151: blk.16.attn_output.weight q5_K [ 4096, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 152: blk.16.attn_q.weight q5_K [ 4096, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 153: blk.16.attn_v.weight q5_K [ 4096, 1024, 1, 1 ]\n",
"llama_model_loader: - tensor 154: blk.17.attn_k.weight q5_K [ 4096, 1024, 1, 1 ]\n",
"llama_model_loader: - tensor 155: blk.17.attn_output.weight q5_K [ 4096, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 156: blk.17.attn_q.weight q5_K [ 4096, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 157: blk.17.attn_v.weight q5_K [ 4096, 1024, 1, 1 ]\n",
"llama_model_loader: - tensor 158: blk.17.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
"llama_model_loader: - tensor 159: blk.17.ffn_down.weight q5_K [ 14336, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 160: blk.17.ffn_gate.weight q5_K [ 4096, 14336, 1, 1 ]\n",
"llama_model_loader: - tensor 161: blk.17.ffn_up.weight q5_K [ 4096, 14336, 1, 1 ]\n",
"llama_model_loader: - tensor 162: blk.17.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
"llama_model_loader: - tensor 163: blk.18.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
"llama_model_loader: - tensor 164: blk.18.ffn_down.weight q6_K [ 14336, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 165: blk.18.ffn_gate.weight q5_K [ 4096, 14336, 1, 1 ]\n",
"llama_model_loader: - tensor 166: blk.18.ffn_up.weight q5_K [ 4096, 14336, 1, 1 ]\n",
"llama_model_loader: - tensor 167: blk.18.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
"llama_model_loader: - tensor 168: blk.18.attn_k.weight q5_K [ 4096, 1024, 1, 1 ]\n",
"llama_model_loader: - tensor 169: blk.18.attn_output.weight q5_K [ 4096, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 170: blk.18.attn_q.weight q5_K [ 4096, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 171: blk.18.attn_v.weight q6_K [ 4096, 1024, 1, 1 ]\n",
"llama_model_loader: - tensor 172: blk.19.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
"llama_model_loader: - tensor 173: blk.19.ffn_down.weight q5_K [ 14336, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 174: blk.19.ffn_gate.weight q5_K [ 4096, 14336, 1, 1 ]\n",
"llama_model_loader: - tensor 175: blk.19.ffn_up.weight q5_K [ 4096, 14336, 1, 1 ]\n",
"llama_model_loader: - tensor 176: blk.19.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
"llama_model_loader: - tensor 177: blk.19.attn_k.weight q5_K [ 4096, 1024, 1, 1 ]\n",
"llama_model_loader: - tensor 178: blk.19.attn_output.weight q5_K [ 4096, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 179: blk.19.attn_q.weight q5_K [ 4096, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 180: blk.19.attn_v.weight q5_K [ 4096, 1024, 1, 1 ]\n",
"llama_model_loader: - tensor 181: blk.20.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
"llama_model_loader: - tensor 182: blk.20.ffn_down.weight q5_K [ 14336, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 183: blk.20.ffn_gate.weight q5_K [ 4096, 14336, 1, 1 ]\n",
"llama_model_loader: - tensor 184: blk.20.ffn_up.weight q5_K [ 4096, 14336, 1, 1 ]\n",
"llama_model_loader: - tensor 185: blk.20.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
"llama_model_loader: - tensor 186: blk.20.attn_k.weight q5_K [ 4096, 1024, 1, 1 ]\n",
"llama_model_loader: - tensor 187: blk.20.attn_output.weight q5_K [ 4096, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 188: blk.20.attn_q.weight q5_K [ 4096, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 189: blk.20.attn_v.weight q5_K [ 4096, 1024, 1, 1 ]\n",
"llama_model_loader: - tensor 190: blk.21.ffn_gate.weight q5_K [ 4096, 14336, 1, 1 ]\n",
"llama_model_loader: - tensor 191: blk.21.ffn_up.weight q5_K [ 4096, 14336, 1, 1 ]\n",
"llama_model_loader: - tensor 192: blk.21.attn_k.weight q5_K [ 4096, 1024, 1, 1 ]\n",
"llama_model_loader: - tensor 193: blk.21.attn_output.weight q5_K [ 4096, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 194: blk.21.attn_q.weight q5_K [ 4096, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 195: blk.21.attn_v.weight q6_K [ 4096, 1024, 1, 1 ]\n",
"llama_model_loader: - tensor 196: blk.21.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
"llama_model_loader: - tensor 197: blk.21.ffn_down.weight q6_K [ 14336, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 198: blk.21.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
"llama_model_loader: - tensor 199: blk.22.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
"llama_model_loader: - tensor 200: blk.22.ffn_down.weight q5_K [ 14336, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 201: blk.22.ffn_gate.weight q5_K [ 4096, 14336, 1, 1 ]\n",
"llama_model_loader: - tensor 202: blk.22.ffn_up.weight q5_K [ 4096, 14336, 1, 1 ]\n",
"llama_model_loader: - tensor 203: blk.22.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
"llama_model_loader: - tensor 204: blk.22.attn_k.weight q5_K [ 4096, 1024, 1, 1 ]\n",
"llama_model_loader: - tensor 205: blk.22.attn_output.weight q5_K [ 4096, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 206: blk.22.attn_q.weight q5_K [ 4096, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 207: blk.22.attn_v.weight q5_K [ 4096, 1024, 1, 1 ]\n",
"llama_model_loader: - tensor 208: blk.23.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
"llama_model_loader: - tensor 209: blk.23.ffn_down.weight q5_K [ 14336, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 210: blk.23.ffn_gate.weight q5_K [ 4096, 14336, 1, 1 ]\n",
"llama_model_loader: - tensor 211: blk.23.ffn_up.weight q5_K [ 4096, 14336, 1, 1 ]\n",
"llama_model_loader: - tensor 212: blk.23.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
"llama_model_loader: - tensor 213: blk.23.attn_k.weight q5_K [ 4096, 1024, 1, 1 ]\n",
"llama_model_loader: - tensor 214: blk.23.attn_output.weight q5_K [ 4096, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 215: blk.23.attn_q.weight q5_K [ 4096, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 216: blk.23.attn_v.weight q5_K [ 4096, 1024, 1, 1 ]\n",
"llama_model_loader: - tensor 217: blk.24.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
"llama_model_loader: - tensor 218: blk.24.ffn_down.weight q6_K [ 14336, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 219: blk.24.ffn_gate.weight q5_K [ 4096, 14336, 1, 1 ]\n",
"llama_model_loader: - tensor 220: blk.24.ffn_up.weight q5_K [ 4096, 14336, 1, 1 ]\n",
"llama_model_loader: - tensor 221: blk.24.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
"llama_model_loader: - tensor 222: blk.24.attn_k.weight q5_K [ 4096, 1024, 1, 1 ]\n",
"llama_model_loader: - tensor 223: blk.24.attn_output.weight q5_K [ 4096, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 224: blk.24.attn_q.weight q5_K [ 4096, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 225: blk.24.attn_v.weight q6_K [ 4096, 1024, 1, 1 ]\n",
"llama_model_loader: - tensor 226: blk.25.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
"llama_model_loader: - tensor 227: blk.25.ffn_down.weight q5_K [ 14336, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 228: blk.25.ffn_gate.weight q5_K [ 4096, 14336, 1, 1 ]\n",
"llama_model_loader: - tensor 229: blk.25.ffn_up.weight q5_K [ 4096, 14336, 1, 1 ]\n",
"llama_model_loader: - tensor 230: blk.25.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
"llama_model_loader: - tensor 231: blk.25.attn_k.weight q5_K [ 4096, 1024, 1, 1 ]\n",
"llama_model_loader: - tensor 232: blk.25.attn_output.weight q5_K [ 4096, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 233: blk.25.attn_q.weight q5_K [ 4096, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 234: blk.25.attn_v.weight q5_K [ 4096, 1024, 1, 1 ]\n",
"llama_model_loader: - tensor 235: blk.26.attn_k.weight q5_K [ 4096, 1024, 1, 1 ]\n",
"llama_model_loader: - tensor 236: blk.26.attn_output.weight q5_K [ 4096, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 237: blk.26.attn_q.weight q5_K [ 4096, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 238: blk.26.attn_v.weight q5_K [ 4096, 1024, 1, 1 ]\n",
"llama_model_loader: - tensor 239: blk.26.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
"llama_model_loader: - tensor 240: blk.26.ffn_down.weight q5_K [ 14336, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 241: blk.26.ffn_gate.weight q5_K [ 4096, 14336, 1, 1 ]\n",
"llama_model_loader: - tensor 242: blk.26.ffn_up.weight q5_K [ 4096, 14336, 1, 1 ]\n",
"llama_model_loader: - tensor 243: blk.26.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
"llama_model_loader: - tensor 244: blk.27.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
"llama_model_loader: - tensor 245: blk.27.ffn_down.weight q6_K [ 14336, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 246: blk.27.ffn_gate.weight q5_K [ 4096, 14336, 1, 1 ]\n",
"llama_model_loader: - tensor 247: blk.27.ffn_up.weight q5_K [ 4096, 14336, 1, 1 ]\n",
"llama_model_loader: - tensor 248: blk.27.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
"llama_model_loader: - tensor 249: blk.27.attn_k.weight q5_K [ 4096, 1024, 1, 1 ]\n",
"llama_model_loader: - tensor 250: blk.27.attn_output.weight q5_K [ 4096, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 251: blk.27.attn_q.weight q5_K [ 4096, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 252: blk.27.attn_v.weight q6_K [ 4096, 1024, 1, 1 ]\n",
"llama_model_loader: - tensor 253: blk.28.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
"llama_model_loader: - tensor 254: blk.28.ffn_down.weight q6_K [ 14336, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 255: blk.28.ffn_gate.weight q5_K [ 4096, 14336, 1, 1 ]\n",
"llama_model_loader: - tensor 256: blk.28.ffn_up.weight q5_K [ 4096, 14336, 1, 1 ]\n",
"llama_model_loader: - tensor 257: blk.28.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
"llama_model_loader: - tensor 258: blk.28.attn_k.weight q5_K [ 4096, 1024, 1, 1 ]\n",
"llama_model_loader: - tensor 259: blk.28.attn_output.weight q5_K [ 4096, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 260: blk.28.attn_q.weight q5_K [ 4096, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 261: blk.28.attn_v.weight q6_K [ 4096, 1024, 1, 1 ]\n",
"llama_model_loader: - tensor 262: blk.29.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
"llama_model_loader: - tensor 263: blk.29.ffn_down.weight q6_K [ 14336, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 264: blk.29.ffn_gate.weight q5_K [ 4096, 14336, 1, 1 ]\n",
"llama_model_loader: - tensor 265: blk.29.ffn_up.weight q5_K [ 4096, 14336, 1, 1 ]\n",
"llama_model_loader: - tensor 266: blk.29.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
"llama_model_loader: - tensor 267: blk.29.attn_k.weight q5_K [ 4096, 1024, 1, 1 ]\n",
"llama_model_loader: - tensor 268: blk.29.attn_output.weight q5_K [ 4096, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 269: blk.29.attn_q.weight q5_K [ 4096, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 270: blk.29.attn_v.weight q6_K [ 4096, 1024, 1, 1 ]\n",
"llama_model_loader: - tensor 271: blk.30.ffn_gate.weight q5_K [ 4096, 14336, 1, 1 ]\n",
"llama_model_loader: - tensor 272: blk.30.ffn_up.weight q5_K [ 4096, 14336, 1, 1 ]\n",
"llama_model_loader: - tensor 273: blk.30.attn_k.weight q5_K [ 4096, 1024, 1, 1 ]\n",
"llama_model_loader: - tensor 274: blk.30.attn_output.weight q5_K [ 4096, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 275: blk.30.attn_q.weight q5_K [ 4096, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 276: blk.30.attn_v.weight q6_K [ 4096, 1024, 1, 1 ]\n",
"llama_model_loader: - tensor 277: output.weight q6_K [ 4096, 32000, 1, 1 ]\n",
"llama_model_loader: - tensor 278: blk.30.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
"llama_model_loader: - tensor 279: blk.30.ffn_down.weight q6_K [ 14336, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 280: blk.30.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
"llama_model_loader: - tensor 281: blk.31.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
"llama_model_loader: - tensor 282: blk.31.ffn_down.weight q6_K [ 14336, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 283: blk.31.ffn_gate.weight q5_K [ 4096, 14336, 1, 1 ]\n",
"llama_model_loader: - tensor 284: blk.31.ffn_up.weight q5_K [ 4096, 14336, 1, 1 ]\n",
"llama_model_loader: - tensor 285: blk.31.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
"llama_model_loader: - tensor 286: blk.31.attn_k.weight q5_K [ 4096, 1024, 1, 1 ]\n",
"llama_model_loader: - tensor 287: blk.31.attn_output.weight q5_K [ 4096, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 288: blk.31.attn_q.weight q5_K [ 4096, 4096, 1, 1 ]\n",
"llama_model_loader: - tensor 289: blk.31.attn_v.weight q6_K [ 4096, 1024, 1, 1 ]\n",
"llama_model_loader: - tensor 290: output_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
"llama_model_loader: - kv 0: general.architecture str = llama\n",
"llama_model_loader: - kv 1: general.name str = huggingfaceh4_zephyr-7b-beta\n",
"llama_model_loader: - kv 2: llama.context_length u32 = 32768\n",
"llama_model_loader: - kv 3: llama.embedding_length u32 = 4096\n",
"llama_model_loader: - kv 4: llama.block_count u32 = 32\n",
"llama_model_loader: - kv 5: llama.feed_forward_length u32 = 14336\n",
"llama_model_loader: - kv 6: llama.rope.dimension_count u32 = 128\n",
"llama_model_loader: - kv 7: llama.attention.head_count u32 = 32\n",
"llama_model_loader: - kv 8: llama.attention.head_count_kv u32 = 8\n",
"llama_model_loader: - kv 9: llama.attention.layer_norm_rms_epsilon f32 = 0.000010\n",
"llama_model_loader: - kv 10: llama.rope.freq_base f32 = 10000.000000\n",
"llama_model_loader: - kv 11: general.file_type u32 = 17\n",
"llama_model_loader: - kv 12: tokenizer.ggml.model str = llama\n",
"llama_model_loader: - kv 13: tokenizer.ggml.tokens arr[str,32000] = [\"<unk>\", \"<s>\", \"</s>\", \"<0x00>\", \"<...\n",
"llama_model_loader: - kv 14: tokenizer.ggml.scores arr[f32,32000] = [0.000000, 0.000000, 0.000000, 0.0000...\n",
"llama_model_loader: - kv 15: tokenizer.ggml.token_type arr[i32,32000] = [2, 3, 3, 6, 6, 6, 6, 6, 6, 6, 6, 6, ...\n",
"llama_model_loader: - kv 16: tokenizer.ggml.bos_token_id u32 = 1\n",
"llama_model_loader: - kv 17: tokenizer.ggml.eos_token_id u32 = 2\n",
"llama_model_loader: - kv 18: tokenizer.ggml.unknown_token_id u32 = 0\n",
"llama_model_loader: - kv 19: tokenizer.ggml.padding_token_id u32 = 2\n",
"llama_model_loader: - kv 20: general.quantization_version u32 = 2\n",
"llama_model_loader: - type f32: 65 tensors\n",
"llama_model_loader: - type q5_K: 193 tensors\n",
"llama_model_loader: - type q6_K: 33 tensors\n",
"llm_load_vocab: special tokens definition check successful ( 259/32000 ).\n",
"llm_load_print_meta: format = GGUF V3 (latest)\n",
"llm_load_print_meta: arch = llama\n",
"llm_load_print_meta: vocab type = SPM\n",
"llm_load_print_meta: n_vocab = 32000\n",
"llm_load_print_meta: n_merges = 0\n",
"llm_load_print_meta: n_ctx_train = 32768\n",
"llm_load_print_meta: n_embd = 4096\n",
"llm_load_print_meta: n_head = 32\n",
"llm_load_print_meta: n_head_kv = 8\n",
"llm_load_print_meta: n_layer = 32\n",
"llm_load_print_meta: n_rot = 128\n",
"llm_load_print_meta: n_gqa = 4\n",
"llm_load_print_meta: f_norm_eps = 0.0e+00\n",
"llm_load_print_meta: f_norm_rms_eps = 1.0e-05\n",
"llm_load_print_meta: f_clamp_kqv = 0.0e+00\n",
"llm_load_print_meta: f_max_alibi_bias = 0.0e+00\n",
"llm_load_print_meta: n_ff = 14336\n",
"llm_load_print_meta: rope scaling = linear\n",
"llm_load_print_meta: freq_base_train = 10000.0\n",
"llm_load_print_meta: freq_scale_train = 1\n",
"llm_load_print_meta: n_yarn_orig_ctx = 32768\n",
"llm_load_print_meta: rope_finetuned = unknown\n",
"llm_load_print_meta: model type = 7B\n",
"llm_load_print_meta: model ftype = mostly Q5_K - Medium\n",
"llm_load_print_meta: model params = 7.24 B\n",
"llm_load_print_meta: model size = 4.78 GiB (5.67 BPW) \n",
"llm_load_print_meta: general.name = huggingfaceh4_zephyr-7b-beta\n",
"llm_load_print_meta: BOS token = 1 '<s>'\n",
"llm_load_print_meta: EOS token = 2 '</s>'\n",
"llm_load_print_meta: UNK token = 0 '<unk>'\n",
"llm_load_print_meta: PAD token = 2 '</s>'\n",
"llm_load_print_meta: LF token = 13 '<0x0A>'\n",
"llm_load_tensors: ggml ctx size = 0.11 MiB\n",
"llm_load_tensors: mem required = 4893.10 MiB\n",
"...................................................................................................\n",
"llama_new_context_with_model: n_ctx = 4096\n",
"llama_new_context_with_model: freq_base = 10000.0\n",
"llama_new_context_with_model: freq_scale = 1\n",
"llama_new_context_with_model: kv self size = 512.00 MiB\n",
"llama_build_graph: non-view tensors processed: 740/740\n",
"ggml_metal_init: allocating\n",
"ggml_metal_init: found device: Apple M2 Max\n",
"ggml_metal_init: picking default device: Apple M2 Max\n",
"ggml_metal_init: default.metallib not found, loading from source\n",
"ggml_metal_init: loading '/Users/peportier/miniforge3/envs/RAG_ENV/lib/python3.9/site-packages/llama_cpp/ggml-metal.metal'\n",
"ggml_metal_init: GPU name: Apple M2 Max\n",
"ggml_metal_init: GPU family: MTLGPUFamilyApple8 (1008)\n",
"ggml_metal_init: hasUnifiedMemory = true\n",
"ggml_metal_init: recommendedMaxWorkingSetSize = 49152.00 MiB\n",
"ggml_metal_init: maxTransferRate = built-in GPU\n",
"llama_new_context_with_model: compute buffer total size = 291.07 MiB\n",
"llama_new_context_with_model: max tensor size = 102.54 MiB\n",
"ggml_metal_add_buffer: allocated 'data ' buffer, size = 4893.70 MiB, ( 4894.33 / 49152.00)\n",
"ggml_metal_add_buffer: allocated 'kv ' buffer, size = 512.02 MiB, ( 5406.34 / 49152.00)\n",
"ggml_metal_add_buffer: allocated 'alloc ' buffer, size = 288.02 MiB, ( 5694.36 / 49152.00)\n",
"AVX = 0 | AVX2 = 0 | AVX512 = 0 | AVX512_VBMI = 0 | AVX512_VNNI = 0 | FMA = 0 | NEON = 1 | ARM_FMA = 1 | F16C = 0 | FP16_VA = 1 | WASM_SIMD = 0 | BLAS = 1 | SSE3 = 0 | SSSE3 = 0 | VSX = 0 | \n"
]
}
],
"source": [
"llm_model_path = '/Users/peportier/llm/a/a/zephyr-7b-beta.Q5_K_M.gguf'\n",
"embed_model_name = 'intfloat/multilingual-e5-large'\n",
"collection_name = 'cera'\n",
"chromadb_path = './chromadb'\n",
"\n",
"rag = RAG(llm_model_path, embed_model_name, collection_name, chromadb_path)"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "b12ed9e5-cacc-4f9b-a6b9-38ccda00764f",
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"Batches: 100%|████████████████████████████████████| 1/1 [00:00<00:00, 2.19it/s]\n"
]
}
],
"source": [
"query1 = \"Comment la Caisse d'Epargne Rhône-Alpes peut-elle aider une entreprise qui rencontre des problèmes de trésorerie ?\"\n",
"res1 = rag.chat(query1, stream=False)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "086aef56-223f-4d3b-a1f0-9d251095e9f9",
"metadata": {},
"outputs": [],
"source": [
"res1"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "5d06fe9f-bb7e-41ec-b919-159dfa7d7e67",
"metadata": {},
"outputs": [],
"source": [
"query = \"Comment la Caisse d'Epargne Rhône-Alpes peut-elle aider une entreprise qui rencontre des problèmes de trésorerie ?\"\n",
"ans = rag.chat(query)\n",
"for item in ans:\n",
" print(item, end='')"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "2f21824a-f859-4048-b745-365703a3749a",
"metadata": {},
"outputs": [],
"source": [
"query = \"J'aimerais en savoir plus au sujet de l'affacturage.\"\n",
"ans = rag.chat(query)\n",
"for item in ans:\n",
" print(item, end='')"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "57fc4d50-2c12-49b3-9ee6-7d6bcf24a7ab",
"metadata": {},
"outputs": [],
"source": [
"rag.reset_history()\n",
"query = \"Je souhaite rénover ma maison. Comment la Caisse d'Epargne Rhône-Alpes peut-elle m'aider ?\"\n",
"ans = rag.chat(query)\n",
"for item in ans:\n",
" print(item, end='')"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f6ad2117-ab49-4cb6-9a47-b70eb7fbfcc2",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "RAG_ENV",
"language": "python",
"name": "rag_env"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.18"
}
},
"nbformat": 4,
"nbformat_minor": 5
}