From 97bc78a6efaa870f950790ae1925b2136ceb5e76 Mon Sep 17 00:00:00 2001 From: Richie Cahill Date: Mon, 13 Apr 2026 15:43:01 -0400 Subject: [PATCH] start --- __init__.py | 1 + config.py | 89 +++++++ prompt_bench/Dockerfile.finetune | 25 ++ prompt_bench/__init__.py | 1 + .../__pycache__/__init__.cpython-314.pyc | Bin 0 -> 223 bytes .../batch_bill_summarizer.cpython-314.pyc | Bin 0 -> 13495 bytes .../batch_compresion_test.cpython-314.pyc | Bin 0 -> 13486 bytes .../__pycache__/batch_openai.cpython-314.pyc | Bin 0 -> 13591 bytes .../bill_token_compression.cpython-314.pyc | Bin 0 -> 7734 bytes .../build_finetune_dataset.cpython-314.pyc | Bin 0 -> 6320 bytes .../__pycache__/compression.cpython-314.pyc | Bin 0 -> 3884 bytes .../__pycache__/container.cpython-314.pyc | Bin 0 -> 4144 bytes .../__pycache__/downloader.cpython-314.pyc | Bin 0 -> 4501 bytes .../finetune_container.cpython-314.pyc | Bin 0 -> 10114 bytes prompt_bench/__pycache__/main.cpython-314.pyc | Bin 0 -> 10303 bytes .../__pycache__/models.cpython-314.pyc | Bin 0 -> 1023 bytes .../summarization_prompts.cpython-314.pyc | Bin 0 -> 2025 bytes .../__pycache__/vllm_client.cpython-314.pyc | Bin 0 -> 4687 bytes .../vllm_container.cpython-314.pyc | Bin 0 -> 3102 bytes prompt_bench/batch_bill_summarizer.py | 238 +++++++++++++++++ prompt_bench/bill_token_compression.py | 162 ++++++++++++ prompt_bench/compresion_test.py | 241 ++++++++++++++++++ prompt_bench/containers/__init__.py | 1 + .../__pycache__/__init__.cpython-314.pyc | Bin 0 -> 234 bytes .../__pycache__/finetune.cpython-314.pyc | Bin 0 -> 8029 bytes .../__pycache__/lib.cpython-314.pyc | Bin 0 -> 1450 bytes prompt_bench/containers/finetune.py | 165 ++++++++++++ prompt_bench/containers/lib.py | 23 ++ prompt_bench/containers/vllm.py | 70 +++++ prompt_bench/downloader.py | 75 ++++++ prompt_bench/finetune.py | 214 ++++++++++++++++ prompt_bench/input/1.txt | 1 + prompt_bench/input/2.txt | 1 + prompt_bench/input/3.txt | 1 + prompt_bench/input/4.txt | 1 + prompt_bench/main.py | 215 ++++++++++++++++ prompt_bench/models.py | 30 +++ prompt_bench/summarization_prompts.py | 34 +++ prompt_bench/tools/build_finetune_dataset.py | 114 +++++++++ prompt_bench/tools/count_tokens.py | 97 +++++++ prompt_bench/vllm_client.py | 68 +++++ pyprject.toml | 0 42 files changed, 1867 insertions(+) create mode 100644 __init__.py create mode 100644 config.py create mode 100644 prompt_bench/Dockerfile.finetune create mode 100644 prompt_bench/__init__.py create mode 100644 prompt_bench/__pycache__/__init__.cpython-314.pyc create mode 100644 prompt_bench/__pycache__/batch_bill_summarizer.cpython-314.pyc create mode 100644 prompt_bench/__pycache__/batch_compresion_test.cpython-314.pyc create mode 100644 prompt_bench/__pycache__/batch_openai.cpython-314.pyc create mode 100644 prompt_bench/__pycache__/bill_token_compression.cpython-314.pyc create mode 100644 prompt_bench/__pycache__/build_finetune_dataset.cpython-314.pyc create mode 100644 prompt_bench/__pycache__/compression.cpython-314.pyc create mode 100644 prompt_bench/__pycache__/container.cpython-314.pyc create mode 100644 prompt_bench/__pycache__/downloader.cpython-314.pyc create mode 100644 prompt_bench/__pycache__/finetune_container.cpython-314.pyc create mode 100644 prompt_bench/__pycache__/main.cpython-314.pyc create mode 100644 prompt_bench/__pycache__/models.cpython-314.pyc create mode 100644 prompt_bench/__pycache__/summarization_prompts.cpython-314.pyc create mode 100644 prompt_bench/__pycache__/vllm_client.cpython-314.pyc create mode 100644 prompt_bench/__pycache__/vllm_container.cpython-314.pyc create mode 100644 prompt_bench/batch_bill_summarizer.py create mode 100644 prompt_bench/bill_token_compression.py create mode 100644 prompt_bench/compresion_test.py create mode 100644 prompt_bench/containers/__init__.py create mode 100644 prompt_bench/containers/__pycache__/__init__.cpython-314.pyc create mode 100644 prompt_bench/containers/__pycache__/finetune.cpython-314.pyc create mode 100644 prompt_bench/containers/__pycache__/lib.cpython-314.pyc create mode 100644 prompt_bench/containers/finetune.py create mode 100644 prompt_bench/containers/lib.py create mode 100644 prompt_bench/containers/vllm.py create mode 100644 prompt_bench/downloader.py create mode 100644 prompt_bench/finetune.py create mode 100644 prompt_bench/input/1.txt create mode 100644 prompt_bench/input/2.txt create mode 100644 prompt_bench/input/3.txt create mode 100644 prompt_bench/input/4.txt create mode 100644 prompt_bench/main.py create mode 100644 prompt_bench/models.py create mode 100644 prompt_bench/summarization_prompts.py create mode 100644 prompt_bench/tools/build_finetune_dataset.py create mode 100644 prompt_bench/tools/count_tokens.py create mode 100644 prompt_bench/vllm_client.py create mode 100644 pyprject.toml diff --git a/__init__.py b/__init__.py new file mode 100644 index 0000000..525291c --- /dev/null +++ b/__init__.py @@ -0,0 +1 @@ +"""init.""" diff --git a/config.py b/config.py new file mode 100644 index 0000000..b70df9b --- /dev/null +++ b/config.py @@ -0,0 +1,89 @@ +from __future__ import annotations + +from dataclasses import dataclass +from pathlib import Path +import tomllib + + +@dataclass +class LoraConfig: + """LoRA adapter hyperparameters.""" + + rank: int + alpha: int + dropout: float + targets: list[str] + + +@dataclass +class TrainingConfig: + """Training loop hyperparameters.""" + + learning_rate: float + epochs: int + batch_size: int + gradient_accumulation: int + max_seq_length: int + warmup_ratio: float + weight_decay: float + logging_steps: int + save_steps: int + + +@dataclass +class FinetuneConfig: + """Top-level finetune configuration.""" + + base_model: str + lora: LoraConfig + training: TrainingConfig + + @classmethod + def from_toml(cls, config_path: Path) -> FinetuneConfig: + """Load finetune config from a TOML file.""" + raw = tomllib.loads(config_path.read_text())["finetune"] + return cls( + base_model=raw["base_model"], + lora=LoraConfig(**raw["lora"]), + training=TrainingConfig(**raw["training"]), + ) + + +class BenchmarkConfig: + """Top-level benchmark configuration loaded from TOML.""" + + models: list[str] + model_dir: str + port: int + gpu_memory_utilization: float + temperature: float + timeout: int + concurrency: int + vllm_startup_timeout: int + + @classmethod + def from_toml(cls, config_path: Path) -> BenchmarkConfig: + """Load benchmark config from a TOML file.""" + raw = tomllib.loads(config_path.read_text())["bench"] + return cls(**raw) + + +def get_config_dir() -> Path: + """Get the path to the config file.""" + return Path(__file__).resolve().parent.parent.parent / "config" + +def default_config_path() -> Path: + """Get the path to the config file.""" + return get_config_dir() / "config.toml" + + +def get_finetune_config(config_path: Path | None = None) -> FinetuneConfig: + if config_path is None: + config_path = default_config_path() + return FinetuneConfig.from_toml(config_path) + + +def get_benchmark_config(config_path: Path | None = None) -> BenchmarkConfig: + if config_path is None: + config_path = default_config_path() + return BenchmarkConfig.from_toml(config_path) diff --git a/prompt_bench/Dockerfile.finetune b/prompt_bench/Dockerfile.finetune new file mode 100644 index 0000000..4209526 --- /dev/null +++ b/prompt_bench/Dockerfile.finetune @@ -0,0 +1,25 @@ +# Unsloth fine-tuning container for Qwen 3.5 4B on RTX 3090. +# +# Build: +# docker build -f python/prompt_bench/Dockerfile.finetune -t bill-finetune . +# +# Run: +# docker run --rm --device=nvidia.com/gpu=all --ipc=host \ +# -v $(pwd)/output:/workspace/output \ +# -v $(pwd)/output/finetune_dataset.jsonl:/workspace/dataset.jsonl:ro \ +# -v /zfs/models/hf:/models \ +# bill-finetune \ +# --dataset /workspace/dataset.jsonl \ +# --output-dir /workspace/output/qwen-bill-summarizer + +FROM ghcr.io/unslothai/unsloth:latest + +RUN pip install --no-cache-dir typer + +WORKDIR /workspace +COPY python/prompt_bench/finetune.py python/prompt_bench/finetune.py +COPY config/prompts/summarization_prompts.toml config/prompts/summarization_prompts.toml +COPY python/prompt_bench/__init__.py python/prompt_bench/__init__.py +COPY python/__init__.py python/__init__.py + +ENTRYPOINT ["python", "-m", "python.prompt_bench.finetune"] diff --git a/prompt_bench/__init__.py b/prompt_bench/__init__.py new file mode 100644 index 0000000..dc58a44 --- /dev/null +++ b/prompt_bench/__init__.py @@ -0,0 +1 @@ +"""Prompt benchmarking system for evaluating LLMs via vLLM.""" diff --git a/prompt_bench/__pycache__/__init__.cpython-314.pyc b/prompt_bench/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 0000000000000000000000000000000000000000..43755bbbb37553a0babf894a80e433ace51f48b7 GIT binary patch literal 223 zcmdPq&ryk0@&Ee@O9{FKt1RJ$TJ1_lO@TZ=gu7#KbU>5&!@I literal 0 HcmV?d00001 diff --git a/prompt_bench/__pycache__/batch_bill_summarizer.cpython-314.pyc b/prompt_bench/__pycache__/batch_bill_summarizer.cpython-314.pyc new file mode 100644 index 0000000000000000000000000000000000000000..27f0263c45621f3d0e05e3e0728c57403c08819e GIT binary patch literal 13495 zcmdPq*jy^*<4k%8echy%l{5C-Gt4n_us28JMp5{5X2AVyC{52g|(c@Ue) zlevUBjv50MfPFq^|u)I+R9%tO3HT%IA8 zHNXJkQe;w*Ay(3a5h79|6~tL09i$w@CC(7UEzXc5ki%Fa6T~CV5X1}OmBQPa4Jxl8Ivok~L-xk`reLk_V~JVJuMzQV?edQUvizRAVKBlq@0`7*eR+{B{Hs>G7a{5*xM{3M0^veY7l4F0P=|#FS!%l8jV^ zw9KO75(Pi7{$ho+qWoNiLr8A&(njk<00~?6(tEe3Q47zIVr^oi3(o9{(e3R zX_+~x3i)YVs6Hr4Ei6qfE-BVgNX$!7C<^opr_!NQ=G3*UX)pq3UUL;zxinj1*t{4 zAU7%$6s77H0W@QFV_Locy3=Fq8l8ejY3ld8*idaBgR&bcyV$RGfxy4qL zT2fk+_e(Sq_iL>HM+Q@NJpW#q$oCaB?Bm!((;RP6HAIg&QQALq@R(Wo2p-w znVgZCs-Kcy0!j+S`URCG8Tono1x5L}1tsxGsd>p6`bnVF4hbSyI!P_kE2zB16CV$a ziq!b{B2fkg21N!2hGHeKx4tt7GYWnL5i>+SfT#t@VCsUAJ6QBHC@7La{sJ*Tm>m)> zteAkHuj9tI<3U&avj0?sg| z0AZM&2s(V4G2DK6hA_rB28$^S43O~i-pjy%&7EP47JJ}o&NDE`Gx#!bGRQFmae<%- zGsJXxhA<{&1_cIf2AIpznLsYfWRho)WsnEM5GG{?Yi3!7Oh!%as)8VJUQ^Hj6Aklo@z zt}AY_LtS-?DKn*tr6|8#_ZClCYEf}!KB#&~Nxj9W>!-hM45^^oi*+GB1m1 zTo((tAQrH~dA<8e_YE$qeIfiEB}bf3upah0U~|DXpu_!!wA_62ndS@ZE=wC-m$tYd zZLuM#!{@%a^9=JDnhOjU#4PZ;C}Dg;yuEmSd$Axn#j9YupW`%1(8}TAku={m6H>tW&)L{;F<{( zHw_FpYbGXrCS?XqmLeWd9I=)q=9H%1VlFNzDiQ!itPleOgC=7UC=){}8*p5L>jyAl z4yq(TNuYt@I|CD=H{(YzIYamZm|h?Z;TMT9Fff3f2g#=l44^hLGXn$b=M-=agH$*} zi*T4i5Ut1nD&E5wVA(tXq#GoGgoEHk3YNkORPnM@+4v@Arz#|Z>e0-+bcN!?v{Z$( z{33L-;bc?R9Ri3*24@BrmXdAp#O)GRXTJkeZh9 zvj#XIamRBIOAr&d6~Y2)g+St)6YjqvPz801H8(#cHRl#5DAUJ-iXw0-xW$r^nOsst zC@p}JZ!yFb-x+ur**=1Z8NnYw)CyrRwF5#O5CO|UJOeGdL4IKXS%H))f*8XX5Y49` z_AtgEjv&q;t{`qideLVJ;tb*m;*No*n*dPK2OEbV6dA&pf_TlLfu_%-$E44s4+;pr zDitSCJ6a(>FBR4$fJ8mC<*WcIq4kPDp1Z}Cn_7~QpK^;Oz&|+T7ISG)&My&ct>arP zN%<+2MYfqvF6sDPh)KPo zlXgKM{R1nrB;N;CHbJfroZLKY9~ih<*&xvd3Q7f-U$|ye}0=Kck7_ld7SVF_nWde1z@Frv)h9D**rXc1pMr_@-Fs2|D zV`hCuaHYc-#BL5XtAII(0|XHXHG&yimkv}TfijOgLl|=aH;9FbVe&zoI#LK3LP_4o z9Gc*{s+>a$Ky6ag5tNdA@SqNKbjU3;C$$(nIs_U%fi&Y4GE0gfqb->!dbgO0lB(GB zLDhQkEfx?}C74)Hkdv7V9@Nm!D$dW#xy4>kT2zo0!vY#MxyA1v;Ogh-8Sfb28Sms6?0Sm@)XumiP?VTioEo2&UldhVG^|9c+sFTFB(+f#p-~|KL|UPU=B?HS_O%>!pyJ%$2iJ%$37 zFa`kzx0MVG@n~LGeWk2gP=+9{y*F#&JdOW(AW+l zzkyjnj3KN+Oi1Rz-4Mi#Bnme#h$VzQh&6;Wh%JmA)>BX61ohOzr|W=H1lUvt24BV? zc73>6@(eMcLJcez1|J(K;0j~L=B609IpGV#IADbh%qQ{;-m4+CX1Iwls0YMRbq&%G z)PT08;=#3(jskehG9J{G*Mto2f*QxgdR$ztpix^82h{tA4L_!)C?ut(8mLDDZh~rZLoy$z{acg`Qp^kW zI3!GOaX}5tFE0kW4&3&=#SeEyJa|;)7C*96QY%Vsi6IL>2gytGN^Y@(yH=Sg;KD>r zMFl$Upa5x72ow~h79!kWp&MU@N;4B(8r7o2fFNH8cWH~8L=m2dF6t*Ca9L$-tc zj+E?-^!a%+^H#)Mmb1DnW!=GlM@nZ!%w;L74*oleDhtAw$1RN8;D1@w<+7sd1rE6z z975MQBrkGEJ`j+cVSYnEdQR{Hnai?zHw2WHIIrNptYUFPKxRh%4FTl~s@69IWY5Q6 zP%yiu;Bg@-?E^16FW(ntPEM{bA`HR`3)n9UX>_pP;1IpeA#;&KW=`x1^Bwk=WxcO) z_cO}y;COv~9eNcJ=B@#7gj^?yUPt8lMC@8YKr3C3OfIS$WTwJDSlarsEm{V-0 z2UpVt(heGcddbkha6#B+qtpuJ89diTR4<9BZkGD_8Pq~*5*25pvNsCg-YBXAxd6!n zMGc_f1Z9(Mkc>D30|VG?QeHc&b}(O%H@Yrud`a4Pch%3LCQ$Q^4N^+xf^_A92v86e zfh;I0gvf(BT}7bD8psGQIQoljK{`_zso)`7PH;$p>aknQ#ihBLY>*re&g|eeBO62k zq^bbNK1x*q7AXc5FQDe60yHZ4!J1%Gmc?a7ARmEzR@@9OTM>OWrWN6!UYqF^PSYJ? z&@tQ_JVLj`OTVx(3vqR@ec)i=LrjQ2a zTpK42)rdFj<1wg4n?gOb$o`6Wrtm4Mg4I&CDw(Ede*G<1Gz ztORw@^r|=@Q;>b-c zNd$GViuyqLgb~(T0u4GBrGnxk9Yi4ap+LriRTpJ|q5@PnsHwpU9-d^d`{Ti`FPg%@ zzyOXf&^Sc{!vlWN{_4)^JKRzixTI%TT;Nc7z#}~)_6CpG1qrCX%t0&EQ)AB3=WmoBhKf#bIdJS1DJmt0&{C7fScQczl=51P

YB0O-d>dn z!ff!=If^SGj)V5_!6V)ZnJFNxpc$_!CxljTlLw1d-^2>&YLAk9g@U5|}ldC8Ilqv}o74typ6EvCz8uN5w zWHkTC03vVjd4FIAF&+q{eqaSLK5#McI52+T2D5n>c>EYY@PgTV3<7o^_`ys81~#sb zf*>Y1y(J^(bdZz4<;f&gVkYy$SU`iNuyRA50nzykf={s{&C&3Hx_v0B;FIg1>33N7 zFkKR)E{L^&Ee2Evfyx%o<(vWwh-V8;R)dljVe6XthB$OH)m35T(QQieWb z0e=i=J{4?70I0nMW+4bghA?Cmp!pDlB$%bh5F`S#BS;jMLa_8;1PHiW3}%WvLkuj~ z7{Ua?_+UOS5F}uNc$gqeEx}NdK(Y8*9FOeAj~9rh9LPcrXU3*o8fsPNHGR98Ub-1AI#N3N)duV$`NSh z2dRYd1gVDc2C0P!>2nkahcO1JqogR2Fp&UI+Zb#bf{4+Bv9Yh93=@uH3eteZEzAuB z!&oy+1lh*~N3xr(AD}@kErd#Zw;hVgY z;#(Z46`92)@%h2uSRa2bby#+snrXDfxG#By8J6r?}} z@xX&)wyJu@X$rb_3aZ7Rb|pBGRPjN_zKZq0a{*Pt;YIl+sS2va3K}rCYu@5@EG@~% zhpjuUVs}bSEJ`g>fS7Wi$N>}xkbYe8Ep||B<(HOJ@k8b>K*NmS8M-PSh(KzJm4a$< z6(6{<4Uz>nSF5BEqi1NzK%t_FNkQWlqh^&LWH~IPnF}@sJU8J72_H}ubBh)14QMkQ zY;t^N3TR@Y9MTIchKwBC5=U5vB2kpcz`#(&1=a};63}|a%)FG;ypk#rNZ7!dbD)Nv zYO$u|E%u!J^mNd&Pwu3|;>={{{JgZx^deB7=N2oteRhk-DKRA=u_!S&wIsFZ7JFtf zxHoo-H8(pYv*;EZNPlY4Etbr@w0!XDBgh~odwF6}US?i;5vbz=9u@@mUyDEu`dh5v z>APEOkomh?>>-)CsrjWP;As%I4m($eX>-*C z*DLIjcR2X^`8xS7h}$0#J`j3X>O|-jj^HPJQZsBW^Jz4=-%&8wAbKMFibCi`7TE^h z2cq&9luWLQnl$*`F|a?tciABNB8%w zUgvcARSar5A2V@c_cCKWWybA|HvI=``+|qELCXzr^;-2Al^HadAw5)3M}@e)6KKCi zF{qsl>N_zpih!1>iFAa3DM$whTzEqXP(cIk0D-1B8W=!hJ`fo~tCS;}Jw84qKRG`B7FT?HT4@Prp+Mf@HVo0&d4(hSzWF|qDr?Zt*7J!OLP}{p`6G%1K#T+G>+2CTT2-GD5 z7oxYcAj{76ke8k7fhMdVl>qX#h9Xe+?Up7s4bT<}WE)02#H?b(+NfKc>8T|?`RVDY zMIeuZ+9~nzMe9Kh1CQ!})|Y};*n#Jxz{NSF*apq~K#F+K;0?I61BWbnkaO7N=BJeA zq}mmoWnf?c4WktAW?*3Wz|6?V_>qa7k>#rp1Eca|2Cmx-+_xF{ZZmM+X5hNZAbOWU z;x2>CV+Nkv3_SN4w4X9~-e<^rz$tK@Q|c0@)C`NuoGJ}$4}^3Z_`kCWGYWpy0j-(* z%Eimb_EmwEQU0p{C!^w52{}g9uTG5IjOt%yco?NWDlqsn1~Xm=j=Ss^e={TJ28Zlt zW~NL=whwF!&W!$yHv$50IJkWW_v62MF)=ZUeBfeW;pwQl!Yp}{g}u%7BQpbZ7zqHU C;h+ux literal 0 HcmV?d00001 diff --git a/prompt_bench/__pycache__/batch_compresion_test.cpython-314.pyc b/prompt_bench/__pycache__/batch_compresion_test.cpython-314.pyc new file mode 100644 index 0000000000000000000000000000000000000000..acb279ebedc1e81d89ba5e550114d9b57166c8ac GIT binary patch literal 13486 zcmdPqMDiD_-lZp(n!X}ImkrI(0_7c$`juNrB zloIhE&Ju|rt`f;0ZgGYn9&v^offA`8UU7yXJ}_H4h+mu`NC3>12@(`%2oeIbWrKvp z8G=N>Y`Gv&afTo8nXsTi8BOAgVg6RmM8?th%*Gq zf_Np0vBE)e77+{#DO?N;G7PbbiVP+YM>EJX1TiUtv8H_K6-EYz;L@br%o2r?@_YsV zg48@mPX(vMlH?2p#{f@-to)>61&!qV+=8Oi;^Nd4g|cFW(mW)7Qf5w$LP=^xiKZSG zmv3fKQGQXeLIPAlW`16LNosM4UO{DoLQ-joLTPbou|i2kszPFMWnOYdQGQ;2X)%&@ znR&$}sfj5H`Dt94c_pbuiOD6IWvL3u8HptdATx7POEUBGiuDwNQWH}kCZ%N-6_+UZ zDS#YOtdLfepR16l;2a#LP@Y+mp^(Ux0CHS>a(-S(YFOj5LQ-jF4#B<7_c%q&VQ2b%-3iL10ACm-aG)WqZrg_3-5$hd*s;TYhl1J;;a zl$uxq3bgWkg(PsafTJWaJrNXETnYMRhWeo3(Fa)#4W0x&E=|T;+=+R4`6Y?q0Q1vi zyv3HDT9TSqrpb7VB_OdRLzD3qrz1o~YKkV~Eg_HrAm1X~UL~1PQc_TCrLUh@kg1nn zkeZj63G$+TnPC-IdO?Ybhb7K4Ha=1PS`h^oxIl6-|kg_P91;#3`lywuW?qQo49;*z4$wJ0^QSRpwl5tJo#6p~T=pO%>ivKnd=C|C}6jJha6as?$Jv^N}Lv$2^TmwTr zgIskK9797q{DVBBKulj(KNlSZ$AEw!|9~J*#}HQ?h5RA~*WeIGCm+vX50JgVp+Rnr z&aMg|6GJ?LTpeA4trR@dz?lSWSxJ6raz<*gLT+hJNoGM#DmZkKD?!OHu~a3W?x|PfbxM$S)`bCGGq?9gr4<+|=ZZ#JtSnTu2~-!XYKI2$BXAG*T-H67y1u z^@~!AONug+ON#Y#6EpKlKsi?vWLa>CW2AzgYiLN2qmPw>Uq09akmyY-O3cm7OV?9y zNi9w;%1laCfCn_FJX1)?2gNbS3t;igl46D8g8aNUQpm{!rL^3{3I!trh4TEOlwt)?sTa$| z<>=<->Kx+gq7dL85bEOy$}k|?!ZY#}z=fPbaei)UNk(Q~x{gA5M!rH$KBy8w63j0G zi5I6TB&B8~mSyG_fr^Tv)O1ixrKZGkarwGBdpP=e2K$1{^~f&=g-v;WQFgJFLSAaQ zLRu*}x9KS4Cg!CimZXBRQGP~dQYI+-=qM!Srsk!9avG@kfoG)RlEjiyklOU3#Jm!P zf};HNqQqPsh2oOZluA%tm77^yoC!)NsU^vJv0PjM{yv`0kqWSs%oXhePoL<;P)a^1 zohX!KWEMjTIcQ4P0To+$3b~ni`9+}cE!I;AElyQ{#AZ%nUV3R_I;arL2Sq*Dh~)f~ z)MAkB!I8dBAP49N1o=C=x`YNf`Y8DO`9xZAMMtIODzHgcR>!Qc1tZT)~iYl1{e96Rglb^mY)ehKO;XkRlg`RIU_SwKPA5e+@#Sjs4U6I&(kj`%Fit*iBC$+ zOU}>-wAkHuj9tI<3U&avj0?sg|0AZM&2s(V4G2DK625hbhW3<@AzyJwR?_&%M@(jLA zoD6adL0lkc!VED>o*|4$nL&X;n*rvIbS98HGMVHVWEteaFoa2&!J1i?A(K&)yUHU7 zoU;^43qZL(0i5R&6f{7!SbS!R4!GqB%jcR3;G9>ZZZ5~EiFmY zwa{d`#h#a14yxpCair!Y=YxurTRg}G#w~WJ`L~!dQ>s{s^2>E^@sy<&fhvi3P&s{z zQP)qC>lSlzaoH__w9M3;l=$Mzs?_+L%-qb9Tg=6k#kbgV6DvSsw^%@p@mpLjnaL%f zo=|GhE#~ypl3T2xc1FQ1=A6{LTWpC11*v%{np{PoY*GYD>$g}zPAI;`mXVm3l9LK5 zVG>h7dW-VQi^Lfi7>Xo7gcPVG;w;U}EG$ip&rB%>X;o8GfPx}Ws=mbq>bQV=3dKd* zuu_5n++Tmf!hW4a;u4F*9O3!WGo`PvsNPUkZE(H8DcCRDDLbQRg?oqWbxxBjoF+Ti z?g)xZmzyNFfbFuNdI#HWVaX2X8#40q-DkQl@VhKyd|k%!f{f*cqU+KQ7o{C8OFMP= z+!vO=DIjq}Omceq#Pk`Nm&G)$iv?T|3s~X2-hHL}2A9>o5dMymBhDvS4|^T3xnLX6 z;eJC}Zoc_U^96R7rH!sjTU?N~*pSrWbKl%~hWQN51%?Y^7WiG1Fg_vP;dz5cu-~WC zXNKklNuw(~#&`IICYW|t%?Q25uXIyD^rJ9?h}3sh1}@VN5)3?o4L;u`8MsWZahgE6 zzlzNm7#I#R8N2AS9yF71(PurxF9{+wLF~g!YA*V$hc&rCq=|-$KI;)h77(e=?aIK3 zQlo%MLU4_u04{oQ)+kK+Ov((JEJYlkNMkKa%qdO1#avucRKx>{N>G`l$yme(D#O8* z1UUA<#OTfV5lqey{s5*I2t)Wqpb`V@JV=gXU;vHLFf%Z)eog^v zMk-XHMK4Sth*o3(70Y1^unZdj(hZV8!a?w&081eQsxVoqYQ))(TI$8?#%kYU zjK9TRng`O5qRD)VsR$HsrAbA~3=9mKj77SjpaF%M8aT{u@fIhhrN)D*RFIA$P(cn3 zJxy=`JmKc;H|sLHAf&pWc!ld_Zlmkm78kiKb_ieQw!O~nb%ERKfaC=hFGN6sOa^%$ z6mQ^o)&QG>JD!7Bf|$Uq2NqE40TSPwaQ_wQf#W|nKP5Hi7AGj1$AbzVa4NXPl9HKR zQbZ^%#DY8wam9BAUPiW$AYw-F2N1PF7)Z4Yt$~#uUVB4h=GWCOsy7CVfya@Kvce zfyQw_jbYf>6}V}mP?TC&np#`}?sn)Ef%4=nHc&4jKjjunfPZkvE#}gqoL?f?Mo(_B zB;}`67MX(rlp{B_xHvIAwfGiGQ9h`|EUqjrNzJ{*4joUr#Zp?FTBOMcEuX`v!#E;tdUU4T=wO^$qZG3~>c#LrrF|Yrw`AS%88Z6o6`Kuv7*fmx_lv zzsQ||fdL#Zk>CLRz{bESG9mH?hrk528yvhB_*HLk@ZI1Mo)CJ2N9=~6_zZ~!mTPh^ z2-twwVhbeK_+Hm>xTxcBBJ+Zd!)2X_3j&cJSeYgHKCrS0a(&?B=3)E5z|G1AiAYfJ zf-op1L9H)vswBQW3rUeINNrhAZCG5z1#Z5AQeZ4JHTvb}r4meyxuBp?1pE3s11qEa zM-VX~?3A2fygwxU{Zw zYF_8myu_*bs~A-4&2eOM)L=cR!R=^?TBnkos@alrGE?(Pe(`|DLo4*0!90lPL7oMt zYj6onFjed#g7b}jEXaOK7gndE?{ayFjx>XS%cbf;AAZXZfAutVo%nvgodT_ z1L`#4O~^b9K}<$WLCj%{*g9ZgOhGKh%=(PrYKSq2-5hFG0do)s2qF?{1T(hI8K}+# zWgdBkFy;Vm5DOK<;Tt9;~fJ$3w-i3$}aGz-4K?X;WFQIrssmP z3;IqMq@6Dax!e$vo)LaQQ1t^lgNXcR1{M*n4h~Q*ll;KLz{-1_MfM_#>=hRI8{ESE zww<=!_A^Ybaw}Y5QTWc!z$Mu)|Em}jv9oj?%~%hbaXYc0=0IXwJD_nAmLRquc5pS% z0j^shArcO%mU%Pt3Q9}BevZ#f0q4A1+$rFZ(t?uA{5(j8v;>79p;k{LC__TpaNikt z7^Odgh#4{;K-7k0Fm-|%OkD^F0ZT$MCN$}T(px zFh6fFNM<$_$!ZRTj?R=_Al& z32g2NBi)zgX03I&Okpq`yx6$fODD7CmK4HV)Wsd*^{`I&hoMRp*rAZQu} zGTRhio|%`DUw(_($Ry(yM{a6KA}9wGrGrv2BP=n;gQ6kC9`K9yS9ez5;g-6T@S%LfmqT#naEtAKWE^wJo5jfbp{dG!@_*0|do5q$wQ4qtB+y zpvenKZW8C!A%SYO-_M|O>oT(ElRZ&U{R~C84GT`R+%8o z29HakxDw(xXrT-4mMCPVfV6@dXH_-`t(kcxXdZ-Uh0Hd9S}ma7OtAuJ5}`P?1mr=^ zoc#3ooYb<^oGLwpLAc!Dlb@~tRt&No6iG$ipoGW^NuW5p#uGrP(GMJz-x=Z<8O=X} z$s2s$ADBVh2Lh=dSV4>rTns!8j32ncY#s(455^C?U^X9vfZYdvFjIhmjq9TzhzU+z z$;cgG(8w6L^k`uxW{fn91=Mndl@js{F>tej;3JnvU1%QANEnJL_?RVVR1;LUfXa?^ zNsziA)&jN|P%8-}#1H@~t-&k=p~w&hZ>MH3hB4r)#^GkmfX!wLV*rmbl4b)V)^S%* zyO;wh3_!66c4Po(qyo%B5J8;b28gCf3@E(8lHtrI&|X6UdyFVph=GA2j2+g13S+hq zVqi#71i3y2VFPHO45k=L2XSH7kFZsbp@2h=p@1`vp@1unp@2J#U4X%@nt`F5iGiW$ zYz(Nm05&U(7iN0_Ul_8fd@ykxP{IYPWnd`a3gbqy8Lk&a4R?$*R5?Q!56qMT{xFUJ zS(rG2j**A4gSf-^LGcx?2lE9?uRKE-Cjq@YP`$dCdcA8QeVZ`GAYL6%TLEfefj}5z z5Fd(&U<{}O2$l~M2;+domOKNJxL_D3k~kuku%!e3Fae~{f{$v4afJyYiF3mI8OCh_ ztzYFCf&_vD!&pJ#q0d+#6a$(M0lN$P5}`0;6>4CGD1;(IkPyt4AYoWK#?oLHMoM?s zd?W%hMV=uB)QADw5+)ob01J}>5dtQNhKaz`$}uA%z99 z>qLX(kz5B)OITeePQY~vVdA)47o-S+Fn`K31Sy3v1t}xh3@^okRDx84)PmGwKm(eP zFcg5fJxC)$BuFy?EewLR!nlI8!^HI2f^>pZgLF~!3L@zh4bqH2b77EH70X3PxA^}EV3WZQ)2$P6o3etz=6PODKrXYhbNkn`G zDH7~RE{1j_4XZqGt%(CKzCvCPr>%gz98ME59}Q};KxT-l^i?$yi<3(-b5k{o6;w6A z4G~bs56puwtQ1r=pyQ01#h{)fSWywEEW5?$?BgHo>f-1bAL8%r>UWC|sTb+(8d+uF zmzr6Uky@mHs!jp4LI71MXi-FQYDpD8Xl)#5;YeD3X9Us8OF1H8T= zK0muk!Y4lwG>N64ngU*f09{(AT3n^(pO;gK5Kb&h%*;tl%1KqwfR0+GrYNYUXjbun z`Us#kU8yNnR#kk+!DMAsC03kT5}yd0f-X)?(F1p^t0X|;N%6?4p)!`?MfoME3aZ5l z8ZbBMfSi$>kyum=S!IVq?iR0OX-P(YQDzmm3tz?Vl$uzSTBHE#qcAWWC~^P=K}ITQ zF0A+#J1DO5OG~O`AcF~@g#*Q@B?^wn%OEwYgdnn@1!f>gD+Sf!DmjQ4Oox+#2C_HM z^*Dj`2!MNNAT8iNj)G$qd?8*cEaWSym=rW_F=|!`Ll-kduB0sG~gC%Zgxs$ z(JeNR{?wvdESY&}`QQo4BG5?oE%x%nqP)z!^de9r7d(ekqzdW`fX3OugOj&d!9#qv z*dSwkx7b56b5rw6OTa@b;0`Ecw1ErUH-gNufCg-ef3a7u@0og;rubc-$1iNrG|&5Md7q-dmhSiRBO`WS$n(5WU5Z zqzp9UbBi53PnDT+iwj~js8U!Ve1PximCxYS;!b}1&*}-uYpLh$( zNGi$!d4M+;+O-FpQVbfPQv(47Yyi@GX92B2Dq6_EzyR({)g^8xuvgj%UL@aJ|A}bB9N4Ld9hs^#<2F>|7nD%~cazudqwr;o$4%>*Tv2 zZhu7hK=e-%xo$)A4}mW$CCZ9MQ;fc01H}NM6u%J`j3YI`SGv)JI+hCG`&i3_>y&uXol?g+iC?|hxpA&5R-f%Ipu;;>Sductmb@|XW(*W{Gh_XDKJ6w1_%EX z;R&IWrEYNWgC^c$Ztw_C37#P}IsOKZ^o&T*fD`)ymbtt)cqA`KYlBpTf8b>o;`$)K zz$pOY#^Kg5!xE$bs$N_eJW8d(AS%B=?y87>gYOL%!RstC7g=QHgv^hc6}5t6hu{@i zYf$oqrDnAiyw_OFzWXzR9DJSADP4|8Cj zd)8-EX3%7Yj7||hdh!u8dIIWcH!y$}7Knh>5r}kzfGNlj2yzn-RIr1GK&-$H0o5ZA zX+q1y5KX}#kTaR8ghD_=WeTZzB}Jgs(wTWBdVZSBMeU#xjJc>Z4_q37XKBE$fRt0r z$vK%tOBom#z^(>`Sp&lp7S49hCeNOfHh-|nTO9H6pyDh(UQ_87dwhIKesX;LEw1?Z zv{KND^7!~7P-qr`LJ~Z@S=0(DWx#IMK=q!`qbfmDm|8WE}H1C`|9a6%724x8Nk zl+v73yP}N@44_p&48?~T7#Kbox=TZ3e#E44k(axb8BD z-er)u%OLZZf#)^@&r=4^2b==eIi)UfO3kpi%&F4A_CQFtf&YepOat!?0pSMT4=gf* z@r(_;A8eQfg&KIj@(3^repLglTKUSw%gFXsft6AIs{kjX;#UbCM(K|V4E~J4j2D9A zF8jsb%*eUHA^Vw`DU*@y0~>=gV*uj~ukafVZr{P?~1WAcA1WAKIm8f*)A6SRt(_KUX19!8tfgp**uBLm^Qi z0i+{7IX|x?HLoN=Avr&%G&fHnF)u}-I5iJsMtMFL)RvNrqWseI421;!GDCfkOY{*g zDF$iHOD-)cO3f?DsZ@B>FhwChFBM`7$Q?zA-@5lv-SzngUh=@q~hF zVseH;QEFjnYH^8ru|h#ner`dDLT0f-abj6&ib6>~S4w7awnAb~eqMTUW=bm9yU=hd zN-ZwP&nr$<$V^cv&Iijt^cRD@29qvUNKVXCNJ`~O&MzuT%}FdtO;G@mMS5JCjJLQG z^YZdb5LFEgy-NiQhTHPJKF&CSfqOrJFq1WwwQcCj%qMCO+&Bo?JABr4>jre_xCB!ZJk zVqRiSWpRl@YDEb+<>aL+6y)b*CRZvHmnIdLB<3ZjLehY9eqK7r9hv!gi8)B2&czj( zUs|M)m7fF(>5_Z}n1dA(A*wR-O7ayF6;e|3ic@tI@={AniV|}aic5-0lS@jAQd1O) zOLKD*iz*dLG7?J^k`oI+qQwfSWvNA#3MrYzC7F52B``;U5_f(cII(2rCFhi;WagzS z6sIQV=cRy*NKH#iO)deYf;)0#igs@>F(zrGoeXEAwNwa zwF2zZbcGyHvaeLgE6q(xEdr<2R2^^=QE&_m@$e7wi~=!zUHx2i6dVHrg8T!5JRL(^brkZ86kLNt9G!eTgFQg@28RZ@ zIXb&4fJ_YW2y%6F3AR%3Oao^guw^CrrO6qo#R|ElIVG6|IjP{#Nv;GX!^C18g`7-K z!UJX65^z3IC{8U%EJ`d%&8gH=aLHH5%P#>HFFAs#LT>6O^{{5A&!v>ey*V*L5@CF3V!)u4?vnI**v;1VXk2%M-ga}_d6z>4&^xcoyy0zyL+ z-28)l9Yes8R*;hjO0I}Z0y0wv93o!9{(e?mT>b&BL5?AwVXg|0BphtT73~L3G@v30 z6tekg2q%G~BQd$8G%+Wq5^OD`M1|(3g47~~oXosb9fjP)3I!trh4TEOlwyU{#N>=v zE-ptmH&^EnR~Ll<|A0^*M^J_V*%qFWuaFK3Duv?w+|-hc%)E3Rh4PGig`E82)M8}8 z{34KeajHU6YDQvNW_}Tql8?wp#U+U)r69HGMTvPO zpxPk4C^1(@p}3?pr4m$1%|KbYS)t6lLZn z7G>sCDkLftr55MsltO|eCABy+Jr7(Hf%9!*d16s&UTSf%LP~0K4k(j>8Ux@OJue@r z*nkwE_OV=CKCXW5Asz}LjzR9OAyx{8rUs81CL5W;l152>Nn(zkf>WwOQE5p=4#?!h zr2Ntnh2+wb5@^22E6yybgfulkl>(^7135vjDm55fT2SOxf~uO#JV@n{nV(mz$HnF3>Eoji;u;ZR#Z?W;C9tMRZ8E4?2g0Cc6R2^@ z%)r3-S%m@AI1OWnW3p&rU`SyGHLd~~GZ=#ygP4MtgII!CgV^+$l^HbIUotT;Fx=uu zE-s5NNG!=H0vU0OH95aDujCeUW?soHwxZOM(xSX7QBXk^T~b<*lNw!IQUpq3MX|9f z89=prT7FS(Vo5Q`8A`V-^)vEwQ}v57lQS|?^;7ao(lT>Wi}edCOEU8F^daqy_@val zg zrY`6?gGE1sLLeFB9S{SA*%=rZm>C#YKesV~f+Geb2F3x%s=y))42ldfEKnvx04Nk- z94HNI)ygyIF%&R`F$8czC6Fjc@R~zSw`PoIEM+QXj)%8t3z(7I0PbTTX=VVqA&ePj zPXS99ivWY$F$RVL)-cupUR0GJUJwgRtu>=MNPvNXAsrmm@(e+Y0sK%QBnsBHE?^5| zL)GDJ#=wBX7FH~_Xn+!wJT4tdRy4wx!zzAWGf z> zZZTA`mX@UHT4*xeV$Vx02i0x2I8yVH^FbxaEgs}@;ubs9{98LR(!9*V($x6OlwwdeQBzZZf+A4rxy6-} zpO^yfD-;)jy1U>~f&ttif5O6kokijji^Lq^`O-6`udt}zP*!bly}>EiFWV_QqiBVD zhwODulPjDiJJ{|BicFWAB)5R=vY>hg+ihXV4(A&(^7Gwix-am%EMt6K#`1!U<%Xi` z(he7;9WF~db@<#DmcJ<=aYIaUdiuol8JU;GG_H#UTo4Ob;k@2`rTYe#)xHq^j*=tJ zCs+@A9k97z8_?l?Lt1XW`AqW#c9*4%u1i~7kha*6)ZugA+QG6gGaF6r_*PK<^@TkD?G+`_=P5zc2~^^y~eL}Q$X~iFoTHHcUA^2(+?61Jc12A z-z6EiOs{d8K)Jt)%@`OM4l)_L=(8R)lW@^zJ;W~wA~iwm!%S)}`mBdFxj>|ehKoMy z5k(ddsn6}oz==|kfJ#DeMWO&MdU01I`b^3Unyf{jNV>%dieGT&?G|%!Nl_6W0|P@9 z7r5F;$xJSZEdrHDnv6vPpfVj?SAb&^T+V}uD3F8{*v9V+tc+|QLBx#Y4^MflFY5#0<+NxfeK0ZgBEX z5St;n#P_<2#YGj11DO|8EH0}AT;K@&z{sG zi8-aIMf#w;3yN`qv77|*m>f8=zB4c}dNY0mlQV=rfawLo5I(|rAg6&aDBi%;c?wuF zB95_F=b)4j#sDj(vBi22yaK>dwSZbjELEDmiP@mh1W?fi>VzkzrGg{8v^Z5Eu~;Ed z0o1t3OUzBxs}hKgbJUGW)U7hmwbYB(jn%%z7=MesG!G=3qRD)VsVMapb8%@>5vXyd z$yj6w3I|ZoLnD~CI590X9%f#VJp%&+IH)wizJ0>Y+i%uoc0ovWLGcRL%iKoSxh*bo zTkH_N%x!y}+v@_i*8#~3EMACE1DOm8HBg*@voa)|;LgfHtU)Y6Y(XqR>_IF+96_8x zTtVFW?8*$9JdlWHOU}tm%`2(m0S(Ml=sAOVMc^_U6#2JUa}twMbHM5L7He*PN@~t6 z?)=h{g3=O1%Mwy1$bl=h=%oDo936$UoczQRaBEVbM!_#XFO_gPkq!#E0C2E;XRu~e z`v@X#@JM`Mwq{iOz{bEQ_<A&$g(?G5LyawnF^naMDU1!dlnG)%G$-|03fN=d zP09dJi3Ktr2_uF*3pj#U!x)3u!kECOB9&XnCI+#`fa+DS2?&>f+@Q~1z$wiT#(=HC z7z6SRSPu_F5Qh;{7<&*Wtg%|a6~+|+s$9YH2m+L9KsL4z`ziBGl&O7xksKM2Gk(|D-F;EQz%4` zM5JQmTN6;~VPF6aV@pQfGiC-g5o1JP3IoJoECkKQkTMU{a4g^eRfwi(Rq|{OvmO?S0f){LB2oEgo!6CskKp`Pad`M)% zx*JkeMZu|gDWG8*j9C)sB!U8HR-*va6(|M|Ac3Y6v_a!6;Hd)etO;lUpjbhpxHLIA zwYXSEAr(AYnGz2flLim{f`|P;4R%dEt|Dhp8=eE!RJp}nl9~%1vn?%3y~R?TnwkQs zHk?4pxHI!WRYY<|Vo`CGAjSj=Xv!d~I6v_u0;;+cJVHVO6u^qWwYX}rzG|^TVp>URk%Fq8 zQChLzEq17@t0dhLGjmc?tQ0U56@jwTEpFIsNqlC?E!I*{d+-(qOsq=EB|k3}rZ+Ri zR<#&3G7q*RL{l8xc)P_7YVv``H$#J5Z?Ocsy1LwADap)Dy~UZEpO;^fpO=|j1j-S& zSPJrsOK$P|2e|q`L8G!L2voO&ItpqkAOP+5DJVc1!Td#~dGYyqsqxUXTa*bJ^8jg) z0}t_hkzx>#n~`~$U!%d}0SEsC(Hk5>6FhEkh)$`#!NE6$_+~5$n&Y^gbL-7iS@(q6R{@Tvk?)navFDeZD!u@$&d6)U* z7KmQuQ)_U)AtW|kW1_|klgWAw-Zx~_J8UnqNZwFTzrZbXkwx}~nAG%)i5WA>FN>)+ zxOJE}`rnY0?XbGYB6fp?|2m7*MHZmxYuTgkBZW z>R^AsBRRu#zTHf_>(ZtdrA=?}2u*OE?l;lzx`_Hk5%n89vNOs*u(L>Wb+C1mck+MW zV&D|H&LMY^LvBvV{Hj@1E7W$Bo(Q}m?|+3O;0_1hbq38YH>6dTB(G3ioOQv-`9SyuDPM?fH#mfeu*+ma>Vfnt z9Db;Fec)zgRrytH0qPrzTDizG9FZ6G6k|SO;w0qG!+e~L(Nm21I1dkqlmxS-91T4M znNMJJ;j)-z@6J-z2xGuDj`U#Rv$D9l9(A^RGL?Oi^bE=%^%z^g|!etZ7pKk z-E2YZuoia^PY`bqAGqDk4{mpZ2T-bP;nStjpb1nRg_6pG)FM59a3qW0 z$Vkm8uvK@;%*iPR&t`*$bk#LuZ*f7K9G{X|R253dD2Q6!l*}TyiQ%A$LeOv(xXe%} zNG;NZ=0F9|OpUr`EI8j+nIOytj}fD|65=?2$dr99sDq%8nF7)Z8Xc;#L1@j)D?#%h zL@Q+G2Q-lg8fhp7IYXg1wFKlr?&N&vG<$Mol@np3oMFmK6f*O4({jMGs8BBzgAC-% z$xn~ZNi9pwsnSCjhs#qw`RNK^#USs1V*VBjs4GLTFT4~~bb)&D4GiBI;u%3h9w71t zpZ5o55aWSB>IYU3;{z81j|1ZeZZMmNfyaaK1234(#~>8K_<UA$=Nn?}LGXK@>J(a*Pe!rvbNdKm_)FbQmkBCk^W-$TP%1Teb`gLGZz9r2aDx zsNIF43O-DYGVCk~QWwNZxMvi`4r_g7ForP%!23NQ7eg?t4+0;6l>wX07RCS>SSHk- zMzMi0h+PNCjslJ_#vl$95za6UY`v=>PEem25l&&8#Hd2_$*}dlg1EyCU?Ujv3^Aa= zFR+)xnN6VMSp{4%p!fufhH=3{Dva4eh=CzR5flzcHi(0jp%6hl*!3f9)nh2&4&xGF za0_B!C}(0|C^{Phs`A06guz?JJPbj+Moa~KVcY?r5nQk=f(YXYVvmu9@xypw?hNlk z4q0P(N|9&4X%0vD%`iS(y1YXe7(hexNPfoBrw!sm@-sKgEnzGOKL_y#352nMQjb1k z0e>7rfj}HXfuJ5kflv%+$fp~#f z0Z)NM3}|!+tUXK;<{tqDw^Rm(0;w>`Fex2Sa~dqqz);Q%b%{inRDdB=5Q&0?E?Av( zflPsHm^3Wcr*MN(dze5RqYf`ZOM!qdXP6+Gm|&O?wp1D<8pabO7A6)X9wr+k5hfEP z873Vh6=MuDEldLDetq~9mpp@aI>g68(jbVGhS2!LrkpTgl<^U{Aek_xAX%go3(w_2 za$y2N@?nBO3Sn|VieW54N?}6!>;>{+j6urCIix@#Od-G&<_iQJ1DY)Wa|1waBQOg= zC^Ce}$1w$|z)~K}@A3=;a+hkDK$u{dT$lpV7>KkoLn)IyLn&*41SDN#GT@&w^H~6y zFe9L9HAI!BT9q%J^?}fpdK#b!deHK&)D+N2ejb9W37NnLH7g)Xh{XCQqU@r;?$BVe$eVf&`O)M{L;J>D}`HJ2`}G2O3gnnrxGEYSeBTXlbDo~ zs-OXF5v8UmsHSLEvFd?(;#C^KrAfJ&-~|vMO;9z(3ZR8;ux3?qrLAhpEndgcl8pSK z%qs92jVgAh)Wo9HA_dU+Ed#@WA_tH|Gg3jbfW^1iL1CR=S^`>On44G;4_X; zp!tzoY>;94TkIj3xvBZ3B}JgY%v)^5;1~q=o{K<3YoN(kkhdXy{CJRd&{#rI0s{ks zrXsitQj`M{1&!>0gA*KDMS0*LODxEY&rYo@3Iy@^K!F3vWT5WGEdk`D2@)v+71*~} zOA?E-iy?6f8nV2_0iXG(1X)xEBI-c~vVsP0DsFLsLKig9QB(pRcL1eq$ZAta5&+GY zfyW(g@uii5R#(QC-4Jr=V86pBe4S6>5}(2X@ymRg7dSL;a0_4O zmcGa>eVtqO61VIP5#0wd(KvaH#&Q(#p2EQBPiVOU%iW^;E5xK!4be%=w zB8$WVkqs;tSR}5nnBHI!fea!nFxjATfko;Hi}@4S;DOc(*DEYGcX-4mR9xm!Z*aZC z&edVsTs6V<3cKVT4!(ZAPQDA`_D6&dgdUbU5qgCq_<^25gX?WYwTm3G9qf0+loz;P z7Sp*Ps=Ffeg0L}YNQAear;}$w`iz<@y7mWnF6;W9sJrT)aX~cm3P;ur4w!7+ikb@n z5m#*^E{H~6u#?T+*epa9`MU8P+y_GLh^!w@dZAU z2KO5Rf(@QGc=;OKZb(QqxZV*I1x@W=7F2BTy2HuWFViJ6BYjQj`r1{sJ3=q(J74E? zxytEsg6&E`(hU*$@61erjBH;SK;#2X!H;4L)((u{M7gE7K8k>t7X{2Fu+I=(z`4Tu zgD?ZHXup4_{~ZDO8v92D zK@MKsk3t}mK8P}KihpL{mg4#%!62@@!0wuuQHR?NEsGsuSG622@~C&XPY9jieM84+ zz3)oj)&3WGG&|fUgwM#n&o6dEKypUlRKAIv4+O+!C{E>`zy@6{@IXX%M)?g9=^3Fn zL=+cT-w;t@PLg-|v8yvzDB5!aAOi;VQAvS~O28ZZ`>Khz_ z6HGtwbAU$EJ_s?0$SvTzDy-MwbAv_TI*as07U?-I^Sx$yEhyhmctytY3X2tVWaG_xnozQW#^s(hUrOM$ez}pUuxq;yc3un7$lV?v#n?G3P zEspqjP+1lquc>y6Jw84qKRG`B7FT>cq#TWpF9L;T5vV%{9&s*e0tJ&ThyYEqfvccf zLhu2Z5f zP`{9(P>{33Km@3PTm)(nLuy%2%7av{nIKtkV1fb;Jw!Qda`RJ4b5iY!Rx>bw)*vwy zpJiZR_`uA_$oP?ooss3M5Cfy~V+O9<4BWRF_--?B-UhSpGuYo{5WUMFahE~nF$2$S z2A;bN<_|apu5(IV;*^?UahX%4f$f2iZUg@f0l5a=8v?=&yf^p+8@O++zypd@s$iVOz#DQU62!ru+4kH6Y149r)2}2x15Thrf2U7`?Jc!NY z$y~x5BwxZ3%Ob!K%23Lr$56`TX28Huz!1wEzzxxlOvW+?Kutv!4P!82gz#gT1Na~; zWYUBQ!V6$YoFPaC%odI1j^&Bvjpd8wj}?d& zj1`I%junaJiWQCJH0CW43z8LQ2$BP<6c1ujW+-Kn2m2|LK~tg1$|t|LI48BZSU0mc zL!l(KqC_D%KewPLwYWGlKTjbozevG3KQA51Ps~wB%FM|DtJBkDyv3cEmzQ6XSdy8a zSL~Mz3R@5c1tJRr0|PSy1LJ1}22h-aF~l)hv@kHFFf%a7Fa$DYFa|LOF$FOPG3hfY zGpuBJ2@3gJEFk-DF&CE<-C`?BEh#O^Tgjlwc#AD9zbH4cq!?tj(k*BGjQreG{i4j| zjLcO1l>Cyk%$(F>{esGpjQl+Pf};G~f|B^8)V$;jeUMw@OY*Z*^Wu>_rB_gSizhxF z>h#q3_#z$#1_oya28LoOuw%b7urLaI1Q83EKY*we%wXPUkjKEj1TjFE72-?Q&u(Ci zF(5H87GQ7_V_+y}Vqho|3S$gl1`DDP;R>+SR>0sb0ZD0L49X0ej8#TKsRcQS$*Bs7 zd6f#uiN%?D=?eL23dJRfC8-K|iMgr83gww4844wtB{``e1*v+POt<)gLmWd~;{zN+ zLR^FVf^RVwmnLa47qKxgFx+AYH#M)M800E71%)C|{<$TZ2MXkz z%&OFQuto7;i;9F{L8Aor;d2GOiySf?>^Jzu`l~yuXINk6*X?5Y$jqRq|A3pn!Mg~Q z>%c(-iY$XIk1i9iW<+!aFoC2|u_6N~iHb9XF_?e?3RHM8XfjtB1f}NYm!*Po zNp51XLRwLNu0mdEZfa3xvO-Blera)HUP`e-acV(gQDRAcQL$c?h(=6GwV{r2t%kjo zPE1O*ajmAdCOC{>7J_^&49YA{V0VQv1i(TMrhRHVqjzyOX~h~ZDTdHc<}%q|FNUepf15E*-!JMIEY z9N2Jh;K70j6nI3JG{hw^e~=du3Ct6mz&sfj2!j%sC*ulXPy+L0M7RYMFCfegD-VLe z;X_OU3u8c11@a5X93&jXYz`GwX3%7*ihw3R$6#kqPlb&9qRguNypqHm-J;T*RE3<( zywqZaq|(fs5=hD{%}YrwDo)NXN-fq=NJ%Wt0MV(1rHMJkItuwk3W>!fsYRK^*`Q<^ z7gMYi9Ur4>8>1ClZLCupQ>>*@1d0n#vPsKHOfS}CE|O@nJJv(x4Zx5ot*4@Ah)FqT6= zu?#EmnHZHpk*W-dV`WGbDVhHzRHWyn=w{|Y3-m+QM|zAGPmXh7EMIZ&@hVPP@_m26e-#>ia}ANy`UJ(L%4%_QREGe zqLPfF)KuO4BHi5lB2aCcSDcz$T9R3ossIo2l>G8M1#sLXwg?0z}`oV0nHg<3qme)Yh7T`Lin79F_4TL1I&!- z9~qb#)n_Pw08tB+!90XJFk&DHoO(#hBn8M-5%x^Q9K?i@saUGKgF%f3NQqUfkeQpC znv$7VlA2Sgkd&GRin_$S6otgJlGGwt#3>|aBo-wmm!uXIE9B=Xq?V-?Rf20Sy($(3 z?U=k4xdqa#d=NX-(A*deK{>cGB6Y{V$T<0 zjEF&l0tRn!q%kPwDsMm7a1c0Ymn9Zu=9dnVVE#d-?h7}8_{ z4;0*Dheky)s12e99iI|Gj8Byo!y^ln@4;aO8a!xVcr0uV8k-XCuj#Cr;l3dBGQT!> zbV|g6exp+@$Wh0{r~%3<8Z(?hHHyXpXD|;>)RA4I1j4fotaDRRo)2$77o-+t=BE@Z zXe1=)>FFgT=qP|0dI<@d3W#PjwD$>73~E2?RU7Hl-eT0lUbzdxo6n#WP*9v&Tm%}w z0S5qj+c|iF%4Kf#3oPmgztga|nuQz(9E=hl88{dvW(0o#Q44~>JcK)7*#};kfJV3) zklMlm3~t=uY{aMt8V!WHyMV!)7nWtPjmeon%>@tX9&s!v$f;DwK^|;Jbo#(mOj3Sk zPHIs>4rq`UX|x(t-hsxkA!F`bTtT3&E2Pa_Qk0li4618D6_8a*Vo9P^g0sJGK#*&2 zu&2LYd~k?sKyU&`GQTJ#wMYk)lk*h7HBK>C4y5&`Us9Bq3Ch069boXFCa6?R%gjs6 zQ7B3+F3l;?rui*)$DG8Z#N5PNY>qjJ#o39s z*d4(GV2QUl9E-9O^NJITZ*e&%=47Vj7v*Ip-r{i1&&e-JOv%5+?VO*N2Wo>RmzLb( za7oQcEKe*-y~Xa9lV6mXl6Z^VJvF~5Ju~qZn@3`KVrJ$oR?n2gjQm?1o}j`lKePB2 zyJucXW@29AEf&xG^2A$g-Vp0Jyi@Z^N|UoIZ*ltMmu41&m9Y9IX6B{d;_yu@s?15u zOS#4Cn^;_&n4D2soLW*+e2c>uG<%Vrn0JfQH#09UwK%^d@fNplW^pkH6%=HGR1_EI zmlkE-V)q3N@Z}}m;_ypNDgyhO%`deqF(t8z*DtkP!6PxZpg1G5D7A_U#Pv!oDo(A0 zuzgc2GL!SGI6&f&`9;}Pe17>wB^e6NiADK2nR$s-JP^K1Vm8PDEdCjp`L{UyvvU$N z@A*mqW z6{n`&Vhu^HNG!g^5?Yd&af>}HwJ0|~ujCd-SY}arCMc@7!V`-#GV{_)^7E>A!&8e( z6yQ>v;hDwB`FX{edAHcZEAw+R^U|xh14>hiO7azgGL!RfvACBe=2r1Jf=A&K^Av&; zbMq6cL|igKNi(?wG}7aopHrHfl$ls17#a*U8R~^%O^I9V@$o77$?@^GxZ>l}N=r(M zK(j)i+6O$|d5aA^wGw=b9h8(ab5d`yr4}bA7Np+d@^ts}4{~*O40bKz2et1&Z3gf_ zHh8QPJlX~x(E$%CfP1Roo)5Uy4Q}Rvn;S)-Y7AU<7Wslq0c8)+peuMH20XB802`~% z%}*)KNwq6#WME(b6-3477#J8nFf%eT-ehoi%AoX^f%5~41s78T+s7hChET?6#>;}y z7ek^>m`*U7ZZ*+rM$-JunVBnCFU#p|VA{^Rk@vE&-2v9iLT(pBqAm(XH}HLBV__8d z$_JWO`O3w_=*b8ol|h6Sh|vDZ#muPwm4}JZ1uSC*5;Or3nqRq~IyFA6*pD5D>2Ep%29E=h_Kb0~u2*ofKgW55Z I&72Hb03D5_)Bpeg literal 0 HcmV?d00001 diff --git a/prompt_bench/__pycache__/build_finetune_dataset.cpython-314.pyc b/prompt_bench/__pycache__/build_finetune_dataset.cpython-314.pyc new file mode 100644 index 0000000000000000000000000000000000000000..16b511842fda7ebc03aa44bf1b4e49a6f237fb65 GIT binary patch literal 6320 zcmdPqXP??5UTf*48|;uwM$JsCZiN|@w9Y$i|U z66QFDASO?i5|%iIAZAZi54I9EupEmgy9Y-J2bj&|$yve~%Mu_0F%g+mWQgT9VT6d3 z@Wh=c;SFLHX9!{wXUGvK;R|9HX9(f|v-xA$6d8gzVQRrVE)*WzoFE=!mLOhnh9EwW z&K$-PfgpZyh9ChDuS77GJ4nzXf`K80i-AFgAy!b4!31JGgFHhJlQI}<3MH~IGB7xm zX6B?QBr2q3=B4VEl;&mTr7L&^`}_GQq$HLk7N?ddq!s1oDkLSABxfiTr52W^7MCb! zE994!6qJ@Iq-Ex$7VB|wdF5y36)Tivq$=bWWu|B5CFa1@LJZR=t}HG|%~jA=C@oGc zQpim$E>28OE!I>h&n(FRYXsZC1vM9{DY3XXv$!NNuS6j^Ker$!wInk?uUJzdsZt@i zw74WcH$F2(p(J0SpeR43G&xlvQNcMQvBcL$x45J-CzT6k3DiHurI{s(Nja$sY57IS z-qqvMWW2?ln3tDdk_fiIPm}Q$OF&{th9=`JPDhA%YDzLF20<7Uk}M1i49pA+jGq<2 z=_ZUJj>)2hfgyz%6yAZ18H_=UK}yiH7_|sKM540@u2uDDa}ibheo4bLFFx;_;{!{ zQ{&@{co-NM#26SDilxDR_|Cw?DEtvb%wYKdqAnMahiu7cE*TWm$C$@xVoMM4Y= z3`L;AteAs=fk90H0*bgnsh1aIY&^6uC@zwNC8P#$LVBR8-r#CSPV45GBAWO#W9+I3L*vu1_1_FMmJRkhH@qbh9Z+NmH=o) z2(unU!!sXi7%M!HLo%NN149fbXh6ys0zjbxW+4bghA>um=3@(EL(6>ZVQc{)A0jk> zSwSrN3@{fKaD;JSRRcFS1{Bj^eHP^m3@MtR>=(la7GPjt2oQv^5Ofe*crhVUO+ZP6 zfq_Au!Fw_T0}j>fNbckeEOwR%PTpb|>}M1ab!TO38H#Rd6!#i_Tr zi%SwqN{i!@^HWl*6oRue3kpE39Mxh44N&2ypqip-r2tW*pjuoc3@YN-lQZ%&lT(YU z6mh7}%U6IZxy24Gg>SJ#9dnDJN(G0u)ZBuSN(HDoKTXaeQIKt%sd=TjsYQt;sYRd~ zxkw!33ki@6b9!pYEq0HPkbro9?^|p+`RVDYMYq_?6N^ACHBBB!;Uo!C4=Q4Aaf5sm zpI4fjlv-2-s@RKUKytDm0u(GoiXaxqi$%&H7PxQ%Wh7V$4=U%u1=uYCa4C<}@G8<} zU|;}i&;-}}59AcE7W8*i^;VQ^D7>y}d67r4!<(cce|pu#ss&mrk}iV_eSY!&n$DUT zzL)v6)}&wIH@zbuHl1%G-vxQc1C|$}ldngoUyM$_kWqR$y6lQT`Aq?d8^Y4lZ6?|* zU|Yd-Sy;Qn<%XEl^o)raGb&doT^7^raKA4s1vkU#faeu~Xpkl`*%^sbv!>@w%v%t- zf$6fC@pUoti(=+GI1dk zBSE;oib1h6*O}W@j`gsnBZsRT>k%U*S2@XAwjKQMuW|k=yCZ?tqDVB!G7KWAvmWC;&#%Tr?DdrYwmT4(w@ere+nq(n7eT85XEpK-7vaLPz9Q?w>We1)8lhe%TjZy z^bqa^H}p|L0pf3;{B#AdVs*_}a3kXui(h_TD#50O1E^`?3XZMs44RCIj32?|4dJK{ z%pmRu4hG>^#t)oeHXnmf4C4oWFk6H{!0v-6m?_4<#`RGg#Dug$p!FZP)`X@A*3Sye zpjJo>SP(=6K-&=@HVi8=gfW8~{7^Z06{M?|I z`7;Lb1@Q+7gmLRL`7!D<7Vv~|1c1stuxSV)22@FbxdEVh3CuzeiVR^qL4rX-FrUHV zK_1i&!(v*HFp>&3G}p0tmq1%3JV87`Jcw2afjAWjPYPp4v;u>82zB#(G@+e5(JB|n zXbPyrg^i^^yWgNOi>&-iP^As;119FBpf+>BeR{nreN~Ob;^dOd+*HkC1yv1j!H|~- zCetleup)3Dq)NgkKM~YjgxOOJ8NpF4t`Y*tq^2mSroiQ@q(Ne! z<`7av57!_Hl7whLk`B>iy2W0QSd^MqQhbXewIZ{)BtAd;7GpW03FZgxI^JR_%Fju? z#R+e%6@gk0Zkmj@cvACJ@V*B^GBUJLl)6Wu_Ntff`LLnR#jX;BM(H=A6_# zaJ%Ie8^p!8SaY*eGK(PXF4oMF)ZF4C4Ui_*@}kU=RB+Sl7HdjrZb7joZ;=70jbsQS zj6j4jNFfKT+PcM!6ezdYp>Dgy4s~%6D6J#<(YFNQ!2oNt-eQM&7L={j)YK4-Fh~=O zB{wlMugI5yfdO372ZEbjU!)jVd9JgFUt$rTk$J%&@hVHw6Be#^??&$n0$MA4udrC% z;g^{adzoLm!Q&20LS;$v3XR3NS6EEoYIRrCTw$@fBc!yz=CY7MgEw5Q`WoRC(U&zX zudrA>VBx>cB6g8Q>^h6gMHZO_AsbvSu*h6tvAn^@-{1}=9&n0W=ajt2DLF%V1^Wd~ z$?Kd3S2ztexZP0JUT(M0?wYdkMIQMM&l?Kb%lQ}bZ{WPFV0D2<_6EPgC4P+?0^&0a zr}9i-zbPQ`fsH{>`hrZ%jEoC1F)PG2NbktJ;2Cv6Bl?0&%m-#BalQ`s4{Qv)!u>v- zK66CoOU;s6;J!okiloC89>*IzLLlK8B3F3iZtw_CDD3s=a{nmKpke}Qm>GVNVPNHk z1dzi4^$QX4*H{w1%QA2oUgtFURSXLI!=g^?uC}a4ZMofeQAZ3wMKZWi1S+o@7_c{r z^cj^IG?~GTAaM0v1S)9>*4{aw+8b27g9Zke7)3rZFfodBgn%hXU5qFi!NZLV;1K~U zaD7jxE=H7ch%zz=b2p%73zOd&O|q^MG%AU`v&M9)u?`4%guB4bCKB*P%O`!zMRBr8Fnit|*FufdSOkEuO-_!0>^Y zk&*Eu6DuRjS3U+t<;M)%w;A~EGDzHIka^6&^MF&}I;YenPN^9dmpN4$*dB24Hn4wX z<6#v3DhC?Ev0#W}Ok%vjq4SxUDUOltBO5~kV+!L94%^SnObK9qJYzEB4G#6s%uMl& tY#-Pd92wmizk^#`Un3Zq7)3sCF|hD-R9#_~yvf4e=K7JDfkg@&gaG*sZ`=R? literal 0 HcmV?d00001 diff --git a/prompt_bench/__pycache__/compression.cpython-314.pyc b/prompt_bench/__pycache__/compression.cpython-314.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fade9b9b585b38cae908b918a71ea9bae1587290 GIT binary patch literal 3884 zcmdPqCa>u)`Qk7$K}!<^Vnj z3z;-wg7Cr^jG6qHyp0$bI2q&^N|_26N;ra;#TkNF#2In~N;rd9#TkOwz-+D{c5#Lv z4ltWLh)J2Dlt~_}Ba=astIEnJzqmLjwYXR}vp7ScB((r~v zTTl|8l$w{Ep^xMfy@JYHJn`{R_ol|j7x6GKFgPSAa!e0fV;$ zB=W)-lo>P`tBitD3vv>ZQxy{PDix9wi!<}m74p**ic1npQWf$Nb5n~I$}>wc6iPBn za#BGGQuQ>MZt(?&IEJ{!2RMd=xCZ$J-(oH|z3PqqKc}p}86tX#)RjKh{i{il+6$!(FMhWc0=L&ikIb=H6Z}5xtS9ey= zu)fT%+r{#cnL$zi0XKhxcM&KdgM$bZLm&^66#GRW)kT~P3=Eo#MW8$giDpKkpYx0#TmjF zOh5s}zyOLI<|>1r)ZF~CR8ZE)O)OSOE6UGR$SciFEy_$*D9Ok#El$i!DOM;>El4a% zEXgk_)~gcHh)Jn7)G@Bru(#5QNvSrj)zsDmhY`#|kgtV7Im8LEe@NU z#N4EmM7vw6#U(|V1@U>Mxk;%-@yYqQxrxPkHaYppi8;k~dN5@^pwO29J9LNWe(9ak zyJfFi`dzg2J5hWgBoU6_qbRDvfu#|sA3={afLA;sRH=}WDXJzVm6107BGY{1b}Qnk_9n?Sb|t# z5nq688ukRu7Q~83&P1qZ58@z6J!cRXN$R;tRL>K{OOp9~LHs1C7eF;1N0}K-Q3i$rM(j3*F$ReuRC|jE$AW^KN`)NcMnicz7`dugV)7KUt9W9fHKOBUVq>*5V`8hA zVhpQfH0(1qi&K+h^kRy&G-6V;qZ4(j9Cf2&?KNZcs@Q^EofTpXtAt`SG@ud&x|Xrp znlYMHObH29OzP@YY@vF=dd_;cm_vh|tC&NB6{^@`^s0??YHu;>X|hzYhUzH<>s7Iq z>M0cKfq21s3Lsvwo`NQO5vZyHmxQ-CeM5ahJbgUt<>Oh2U56B(4JAez~wTxCPGAWM%aRo%iLNB5%n1+3q&q+ ztKk#TUtqdIA3Nk?|ii`70i!yJq z`{w7Bfb8Y)OHC>Q`Vrz|lqGqH*%C?h{5Rl&0uzL z#mV`3#hH1x*uyLHb2IbOtGEM7Q;SOS6@oI8^KY@ZmnP;`@i~IqbBTEh!HK!~iB%#l znV{^MTmou_JLl(=<|bt(Rtbg%LrsQyp;(jW7JGbrN`7*D{4K8d__WfJ(jrj*0+frv zrNAvVa3ekV7CR^}XXd2dVoNPfPAo{h#pUVl=O5(i>=^7?#LvLM04^0EgGxeh zvmD%tGJv(*a`RJ4b5iY!q8S(%Ks7*dG6Ms{2WCb_#+wWdPZ^XRGjM)jvEX8AVEb6a z$PmgH&3IWb`eI1b3DXHi)2$|2%}AP`IWuzw>t#8;4NTj4H}YN|d?C)Gcj1oUT$ukJWFcyPa(v!^`6<7edMtwN| literal 0 HcmV?d00001 diff --git a/prompt_bench/__pycache__/container.cpython-314.pyc b/prompt_bench/__pycache__/container.cpython-314.pyc new file mode 100644 index 0000000000000000000000000000000000000000..13b408d29788570e026bd455d13646e5ba899c47 GIT binary patch literal 4144 zcmdPq2k3no0W`-~rKbJ8uFf=d(F_bXGF$6JsGI}tTFv)}1OrFdo z%yA4sOr9(yEV0Z15VIiKprj&0ESm`#4gSd z!~te=2XTrs1aX1cJV8v#44T|k`Y!p&*{MYe$@zIDiJ5t+MG85YX{pJT$vLSCxrup+ z>8ZJ?c_j*I`9%t4K0dyBnvA!&6Z7)&OA<>m^Ye=Rs<_H>a&mQ(QuC5Csw6;ceGs9W zUyz!Ym}ymJpl4{QXOIkXA_#+A3i1Xs0|Vn{9k9p47~+^LS{N8om>C#k7y=nH7=svt zn1YytSb|uCn1a}X*n>ENIQ3bT88o?GGBGeP++xkmPf5+W#avucbc>}Rzo_IEb7o%2 zEl#j-d`f1~EwS{1()ire-29@-_|lThoXje)b8oSx<>V)p++r(AEh#O^yT#&{pO?Cl zL6h+oTUvfmZemF>$c;+3%=9zzb5r$;GLthhQ}t8wOVToPQj7HqDoZl*^Yjaf@^cGH z;=x|lhliV9LFFx;_;_g8q{hb=2{JG+@G&qj6gz|c`JF+V(TnjTn0&y^_kk6}{lLb+ z&G&&F%;aDY5dOdkW^yseDt+JvGkF-;xIXfNn4dv`mkc66n1z9XL4<*Ufti7U_477H za1w%sTMS4ZjDuJ~;TOil!w|$_#1zCC#>m4E#AU=30}4lo3hp4DFa{n5Bc>qUFoqz$ z7*?<_0|P@CGb~{R@dpX$i4?HJu)*X4IAJUV9V8gWU;;H)o*{-CCSw9+$TP(7K$#2y zpm2e4ptK@G7)y{)kWi3tkcc_RcMJ>+1*~DL0Rm73NK_nyML7dQiY6$<1c`>R1_&WZ zLYV=gPzDkOa|_771#B_mFsU$jCdyzW;7hSE)&L2ZJ_L==wjgmGDTGQy7zIfvGiXXC z$}%!A1eYWhl_(^F^B7WIEKV&d%gjqxD9K1wNY5-w%~JrUaXl_B$D;IND=q~E1&D~1 zf=6k3dS+g_TVir5L`WeevnVyWB)_OqAulmERY9XDH7Btovn*AiBp;#zlvOnKz=nWw zk(Gi+esPHchzU}Xl$nh8Tf8KUyic6(uHv3KE6< zGzIs7Pz6Yy1zBHOoT{K1zbqoVlFDpt76hksbbUB1^KB;T30tEwJb9^ z)i$pzGbJ-oFF8L~KfR#THZdosic42Fvmn_vBfq$$ib=Puio;4D?33auCf$N6Rx1kw z1A{7dU0twX6$eyOze-qF7ZmQPDG(WmO%l4g=>?^_5claKl_^#Hy1KcE6;QP~sd=|p zObjf|s?@+y3JM-jxd{ud)FOrQ%#sX9_**Hc7Kga4WWL3hoLEp&T9g`}Us_U7T5^k} zB(x*na$3F;R(q;7D?f^{q1;o!c&qjZ@=`38sN z4Gz8!nxgz{4enob7+AU6y&JtR2w3b;y~5&fhlRJr`wp*oe_U7G4EGH#J4#NNUf~UF zaJ#|Id!1e4BD=&4$1CizHv|keh;5hMD0|(|<)Wd>Wka_M2JRR5JUW=~@JY=uzRV|o zfkXZd3m3#_ofT>8b64hG*K@q6=XhDq`GE6PJ&$WFo<*Qi4W;S?6|dl`QwHpN>{X{e zqcX!v<|0rLS_F#xB3@9agGyXY#v*=Dg#@nUz+nrnq`-s?NJ1EF>~{t(MzfC~VuA^n zf;a?GgfTF1z-k^PaLt3LaRQ*t8f3*mjIf$So*@S0EU;t%D9wRc2tttoRP)DxOa}|< zL0SQ9=1^0K($5~mfn)*;xE(>737kP(K^#HcNT#r1m;!bmh`{P&9)ypBc$FD6`Kok- zOY#d867y0Nic)j)L8Wj;Dx|mvHzaZsi?R`ggkF_6NO=Kf@lgb-^KY?$@@9yeCQ}ip zDk=iiF1MJ9a;unh({8cnrIwWE7iHh#O35rv&dUjc7)|yvXwC=F* zwfi;sP4JkJwj}R@u<3!63qj#mSt1bPDGOZI=v@%DJfU(SEcz--3{>U@r{Dz3896Il zHfZeeyTIXnLtOd>pYT_97FM<|T<}b3a*f3l>>ToPNfD?83icDeoZ$(|8De0+eP`ff zOkw;8CTFlfXo&rwlmNq^0u0kw7y{s# z1*({VfdSg|F$6Vz2sBk#!Wgji83I7%0m48q3)V0~YMihtgIoZLaIgvlflq${V;G}4 zRB4JH5pHA)V?@|Vz(4FrnTl1JAqdnZ;H)wWPb|t)$ON|^bTd*D%PJM1byaGyf<~%d zx}Ji6PEKNOqNYM(QK~{wXXf9^wEUt}1$f@i%uCl~uHu3;V|0siGpkf}b-@nOP0r6PC@o3VO)MxV zwk^m^(Sf=o9^3>H)zyXcU2Kz!%XIScGg1>%Qj0V>iV!(h36x7gS^5@xd14XB_eIJe zAr%mz#=yX!$yB5P>R53?O-?N?2BlP3{mTb#h{S{1aA`%UsYT`t3=H5*(Fo2IPaxId zEsh9KCgl~p!6*EMla-IH!R-SN0|y^cwRnevub;1z?}E714!8ZjJAJR)_+PZ~zibnD zg(K)D2j2|=vFW@Md9Mp7UldTjETG!pd7oVXuE%6U<`oVHI5+7UNAgEj1`a`R{?p{W z#U3A@lAjzOe~T+VKCKkooQf|3dE*v4s9lqpmkth+Tb${sB|eab4o5sV4CCV=1s=GB zlLZBe7Ki|qYhb^EO9-$v;8O1vhfQvNN@-52U6CyV0|TfsEe0_@Ff%eT-efR+%AoX^ zf$uhh-~ >zq=TIHhJ-T;^11V0$1a+rW2&SG|GjD;uao_cex*i_z@66sSxA8xH_; CJFBq( literal 0 HcmV?d00001 diff --git a/prompt_bench/__pycache__/downloader.cpython-314.pyc b/prompt_bench/__pycache__/downloader.cpython-314.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5d449901bda49b10cf53365f4c84e3eba068b74c GIT binary patch literal 4501 zcmdPq+pxth6$gMr~Ohy%mS5C-Gte+&!^4GcjHB@A&4L5!Y^9!w=n@*p;o zCvypN977P3Crb%S977PZC#wfr2^(0B#gpBGql5#@X7c1L;f!MlV)f+m;4a|?i?Mm~ zc<`3+$}_|=2gpEdLM9a%V);!NAtEILLG0oTK^)=?IRYhuL7d_YL0n+AP!P8`Ll6&` zEgZxv&Je^0W{bqKDKZ4X^#=(UGX)8XGXx2NWOEoxM1zFI8G=MWyb`fk{vc6{2nL1} zE(Qh}hFCF01`~+24Dt*?Ov+%aDOM%xQJS8fnV0UCn4GGRo1c=JqmYtco|luKn37tg zr^$GWJ25XWza+6FGe57`Zzba`mVm^P3{A#coQ@Fj)D%s|TY|-Ti3Pi?W^b^U^ZYlR^FlVURo67#J9s85kHptAJx6j3JK6qJ@DWg&E}9K*kKl zAjTl3Am$*ZAeJCjeP(5bm25AW7#J9Cu@t2i88lf`}Q$A3)R! z2(`fkEchAZ&t#CJK@1QEB|T;a2G-ANVEr*5$AQEd!WdzRB8)+Y8!QASlo>P`tIUJI zep4vPNL9$mPfpBHNXaZpO)kkVs#GXQEXhzv%P&$$R6veMP#EbIfvnSHDgv2T#LK|I zPz-X5np%+{C>#aAM#V#d5M)e|D9m>t`38n3-24+byX|L$T;*20z@k_r%)r1vSrlrr z6@juP#6dg^3=Cjb74d)EGTf4z-|SHJD3m$Nx%Z1nbGhg z12dx`DDasX4MBm=%xDM-d}c;Nh;u<61_vYq0|O{&fCIk@E%3t`O`z%un8KI>K%oZH z1Epa($C^ z?zX+oZE%6xU{3G?k>xTAWfsd_mNmG*VgLzHP^5q`$cH3n;UZ2@oD+&;kOScvOpsCR zBZDBL*bLzhAZi7K+8_cJgg6NrcOVxrK%K<;`4iZ4hzy1t$1$J~0xL&|!sAsE6t6L$ zv;tOvElz_N!5fdQNn)4(a|3m1cs(gKysf<_HqPuRt0 zuv}-?xWuk;hl8)5yOaBZh~5USD;(B0_$4oJNIqfVYWHsRz968xqT~vT!yOjU8v-JC zghVG~P1gU)%FN34g@b{W8=^#gg~$~ay&K%37g$6g$r+TpKp2!PK*s5F9>t$z7=sRYYbLr~c&1-9rr12dyOsBG1rA^!oyUJ(N3 zAzTP@9SDPpKX9QM2G$tH5C9TI!vYL$5)2Flj7S+zAOPeyG@CH>z{w!T5X7j= zpvhEa36K2LvecqVNRm;=$t*5OO;O0q1E*MsuXPkk@(V!8JSo4VBtKWrPm}o;8#wV6 zgQFB&<7hG!f$}@J3T(9)6zo(K|ia*99&Bsfsyz#$Ahk%(zW~%Q)5u88DX>)sNr3zw;_vGN%8qH7>EO($t{Dr;O6l=Asb#4- zReA`cic5;Hn&gw8t^ii7t{DqXLq!_I6y4^aBm`;Od}rWiRQ?DeZt!@0VCH93e!y@0 zft8<8`2!mR8`np6enw?*sG&5FKxr17oT3;(ZSxp-0ttYYiEsf>>z*MD+)6~0mthP6 zpahAmh=Bp#?ghm;NGe?hRK^D}hcWOl7%>$v#ej+mkOV^j2be-36dA(cWpf5&7()QO z%tj~+Vu6+3){OCtrA(#F@o-xUm}5Yt5Lj^lsHp>HAqYi=FlLzU0#Mwu2C*X57NE8m zSaA@WK0^eH6C(>l0c#AXEd~}1V}8?zW`O&X3FOaACV2)~26-^_(XeKg zWyoaIWUn%YXQ{-T97u*LM$1v))C_9AL(`I8mA;dQQ2^HqRtmS6ijw>wg%sB< zc2H5CnU{WxJ1Ma^GZ~`q7JFuKJg9QH#R^XRw|JZqQvwo;5_3~aK*fH3L2BMDc2Mri z$xOP%0_ws*3PDgt)?@?cqar;}W0M`~!don0=Di zu;eCY<`vm7Fff4Ac`LY7_`=1&%5$AX{1S`!jLZuLiC0;Y9&iix+jZJq=a#<2Eqz18 zV1w8OjtfGT9qf1bgs<}{T;fw$Aby!o^8$zF15W;a=}zehA=f#ju5e1tNV&r&+Mm;v zGsEOEpF#)ocV-4I=`U;y66zPlbyo;q6xY4LB65dC;DVsyH5R4soD5vj*Eywr6@${( zL1l3#Db_<$+)i4YD8&va0ec;y4=QJwA;k?SjS`pLJwe$WloT5nKwTjb zP_IX%BLqxA@-{e)LJ3d{2%NXAzyU`nZzI}NNGTlTOr|QK5K!5t0BX%vDiq{r=9PfT z`65$L)G`;9LgEOVl)$clw1keQtWuXPgHg-@$A}2Em-0!@_R#I7z znU`J!DuW=d;m?5duG12eQ{yvAlfbQ#TT+mIryg>@Qx956fl9zzoaw10K9I%?M|?ae zW5vf8DS>PS_0hns9Y}2kO4;BP3HBMfmpN>5^HWN5QtgU-7#J8pjf&zT1_p)?%#4hT zHyML_ixfC;0VJZKUH||9 literal 0 HcmV?d00001 diff --git a/prompt_bench/__pycache__/finetune_container.cpython-314.pyc b/prompt_bench/__pycache__/finetune_container.cpython-314.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e525721b3f018788d9f926439f06da4f658c72fd GIT binary patch literal 10114 zcmdPqAGAxHzH z2d+bgfDTP@h9E5z9kM~%;tWALAh8m;Sl%FAiwFjW6fOn^8HQLnMFyD3@(e*t%3!Ri zS7qmtpPZdqq>!ASSCW{Sms+HdlbM#9T$!Acs*szQmzbWKo0?amkd|Mh5Smwg!6Qge#+Gt!blNdbgGQO?A`z`)GF!1!5)0hC3; z7~+^LS{N8om_fl3$e6(x#2Cb+>lKCY_?Jc&V)RNMoyjv`O`FW`;88jJhv8Ck~ z2%3D0~@z5YkjgK$l zV_;wqV_;w?76!ZTI|DbP@kbCbAp}f)26-YGM8Gg7#7C^3e}Q$xz{D8>nBhz~BZx7E z1;#M}`Hz8tL7pLo4JH=A0b?O(MTQ`zAm$*J7;wP@5)WcUm=eUM#~#EU#1X`44z;g< zA&en_2c!-a$1zxxGccs^fkHZntAH_vA0{8h2+Kejj0D`zjj$Qv{vaM@22I{y%1))3 zIVlPy8L7yLQUQ`g6f$!Y(^K`TY{ zKRrFQ=oU+6URwSwJ~vN4*N{*@*LY7~M|am-T*akH1x5MEsl~;&n2SpDZt=VLJA1nZ zxq14y#s@ftc--QGq{!s_l+;^1L8W;mnYpR1MMe2VnoOY7T3nh_a*I7TwYWGjJ+&AV z#|jEXybKHsx46Nsj0gL*NEw#U(!j~)2@6-dccb?O0fQ}iS6CeH@QU}xb;aG_6r5l_ z!)1y729rH_}<_Zyul~@g`I_ut-=H8^udvJB5Hjsxzr!ar z!~8Oz@&yj%J1|2Pm)KonF)9K@_$`i%wD{!2ZOu5yG05lb={pqy}O$XXcd@se`06K!hfU z&;k+KAX%2A{QR6EP*yJj<-#Iv1_lOA#v)KIgOo$y7zURfU;<=E@g%UnzB7a~GP*N< zgisHJ#6GZs1wODb2#S7Shp;&q_=P`kLfBjk;xZq&A#5H7UV#t15H=r!pvVV)2wQ+b zMDl|mge}A%ApAiX!WLl=mG~eEVT&;cht_}g zNSPb}Dq3NM1C)kUR|O1VjCkc4!?2coF#Vt)1{s+y2rh>h!x(rNjF<|zVpu_7x1K2@gs5poTR3e&#@)o501Vs#33CtFGhA<|)u4D>hBF5w(7Er=NDtW;rZ5R^| zLlBn{QxInuBM(CmH<-o1!w|${#1sQc6JWQ%l4g(~Qc24U^IMP*k|+yYR2WH=6)q}* zB+3RC6-5$dhl`3KiE_Y1^<)dU!#H7KpCSYb`!MbRL9jznh#+y89(jfsA(&KvD2#=m zgCvMnDH+Cqs4WPXDiy{Ym!bnt4GhHVln!Gg+7_8G7NXV4hOrW@RxXSkNiC>|0|yxP z#(Nld5L1wRkOI8s<_Y5okOFH!A)qz4G$_`B6tSy;<|0{`M&w*XAjFlB!!1ZznL$$} zF_npdA*eJDqcTS{ow&Fhi_(j&xD*r=6p#yCD+Q1I;t~Z=*$b+f(OUyv!Tx?eFb#TO z-H5UoS#NnpYEdfK{*oe4Q#eHd+@e${&&=z)ha0v17I~8vi6dE1&`A7^vt|; zx5VUBs9tcrr=XFPnU|uQn_rq&0%<)YmgQ%rC={jYrsS9B<>V)(Wag!7LX1YQBCQnM ziV~ASt!st+G_W@mGE#F=6!J?Ut#X8Vh<->tYNg;;nwykb1kwZc4XFJCk%A~gRI^qJ z0jWj0DXC?d$*Br(wP1M#P&Euuk5Uy|DfmEZW(82q4DKC(f<3>WBr`X&Dzylz2Ubm6 zDfr|EIVvQkBo>sU7Ab&gZiouh+S^LOH?bl!w=`FwIJK}eH7_|;0aC3)EC59csLuh8 zLQrIYt9UDg@Ql=wjMO4f+$ZJ~=PRUE6yz6`C?qPlyN9|d6qY9Dm1I_>LSs{pt4i1f zQd=wJ<%5E-G%v+UAw-ku7JETrQEFaE@hy(jip=7Y`26f5P;pcQO5s&(y1IFZxv5ps zy3nAq%`3}H$xPHs&d=3PFDSK5%*mR!R@pv4qaVH z>H!%BP8+(Qv{A*PtD93)#jUFgYLe-KBDP9ES2s7YLbo`zP!|$=AZbu^>4I8hRdT^4 ziA5!#ezx4X%VPhRiprFDe@NSg5*J?C5YxHxZR1f z$yuZXvV%Q2KQ}iqFQrHk)F%b?B2?f&0oJeJD+a|O+_gn^3=9n52Iw6|PWj!6ABwgZl!H%w-PQ8yrG+IJi69 zy18#~h=EvM-TXH=_&_Y*%N$Y=r5&E#d^b3xK!SdkIix>W%ki@{xPNzGWZ)3I!NK3p z*U2}b_6mn8qU#0r6|^6ww7>_Mt9C)dVu$fn4ZABW_V8|%(gLq5Ec!@Xw<|2#NL(~o z&nql?Paqw;8SX1oHl$wRwP|p>!On}^g%h#f;db4|_o9vO1#7jsCNez(PM@*%wY{DqV%%|aUhw% zf^I?(r!s>k*DtN$lKcXN#5_=YAUD4Z(gFb$1Q=zb9;oolFDSrSc!KMzTP&cWB*aaV z=@wsdVnIo1QEEJ-oG$_m=-y%~$}JLMU|{gmWW`#DSb+)-PUFCUcQ9C{2ii2vD-}(_}-c zh`?zGR2$u5D=taNFD(JnsYOMajF5^2RD3`Osdz!ABkB@w1_lOjJo$n{9a?;W#-+?= zSS$!xV|RhW>V~*9bXba$6+A4(gE}mAlVA7-w_v|*r|k^0`L;7{uX1Z#V9@}_aFrtV znjSIM4I0yC$w@bMB*#DdZrI3 zQ0Rfgk%#mdl^Hadia`BqKTSsPU>`U;fDMCWVo<^_0ufU|mV-tx8W_H?F|Z1>`GQqd z$wG<=eNY+!wL;=in~~B`X>85ZTP&V_ZvH4C21*s+n!y7ckvK=&zylu8kv6U%?jRn- zFa~Ir2sTF)B%u!)^pyl>{afr%r&XE5XBnbF^A0)+C6xuKMSA`PpdOP3C>m{1+IHaH zpt@#k5vXZYAKbbF=OJ(>3D%(inFVV5RCy6$7D`7IM{f~iAZW0#Dhy#D zc-RolZ|X?xb9Eh1r?9|Q9o|>O(MJRs4jO%|GC>%gnOA~lG$hzUpdBs+NXs9j6*Qhx zWkpykQjZecSpYY{K?Z<^I;(6D2H@~mP7&NkC_PP(5ukz6Dm{b|xSS7f=YuqX23D&q z2y609tU&L4g0zE%WUGP@+ChV}Xkn(Vs|)Ys=;oV-8~pV+~^qV-Mp9 z;|yXphgPNX3_&c)44SM}TA~{T6ppVsU1& zbADc0W_l6G@8BMoCU;RFs5%CfY(#hZf96C2vS--ryGnvBi{c2ua@%SG^%5 z{auus6&mrN+6#n1kpr#S2+z(ESFP>^ITcjJfw~V&j3S`=RHP#XOhHl|$Wj=FTF&~J zV6_VJJ*Zj*HG$EqRVPrGgNQvK%R#vkQmqQL`9rJ(`4xmg)L-VxGJ zJVGs&rmZ4F9AgkOY;{HkBO#N+7!W20ff_X|RR+Ns`Q;=uXZ8H5)cx`mpp96RNm_Uw zR!9R4!GPO(pn)AtmLgEY^A;<(`EZLRH8~^y7K>{|W(jEiAJXtC289=-(OTpVigXZh z2owVr;8N=g8v~zocRaX*-BfpngSVfni|c~0%@q#YJAA_ZSzTE(60h(nT;Nc+!@}9_ z)#TMtc8x_05=|hZK^PRqpx^{IoInFR4Gh>DPM{SF#N~-IAQ!^Y2oob0C^K+%gn%iC zsSss82||hg-a}qqB!Aq-_lv@+txs2at3F4veBCkQ;45 z1Sr74rDqYSXn>>_Pzaz04Tnu`eoARhs$EeY0|Nsn2^6<8GBA8#W@Kc%$>8#oLFq9A z-)#oLy9^R{8Dt(a@H}O(c)%%eom1)(r_>CK%bY3=Y!3t#8u)GqsyFc65VCIIzbPz! zK|`)TEYYxvH8l!#i;o~ih-S{J-aFUg1FTcHtPoF4{QuftnJc`(#^78 zn3x!aJ~J~g@qYlZK5)Th9=dy9@X5aHo^vxI<_3rAXJ)2IMz#-Z3^9y3j5qR&ZaBN# z@b$Y96n4YG>4tysjg-tA4sPE-Z40PrpjL(hBiuNUMIulsCPon?i@q?iF^WTF*cipX e3NbNqec)nX;pwQl!Yp|clr%pwGq6a3(ncoF$xK^(>xT9^56|U^c5Kj|XoF zFPP2b$ydS`#}LHk$?qXhBH$rdBIqGhA_SIW_Z0RJDG>p)IXp!@#7e}#Y)(&c4~Y^9 zd4^b)03(P?kx4~{SSb@mh)9Wa5SKVZ5Vtr(jzEb_5RW)R5HFZ58^kBh5X28=%LNID zGXx2O+48a6iVQ(QF#TYjFpL)@V$2*QD$Woj22z{DSfUUlF3u1n0pgV?#!3ZAT0}4~ zq;N4X$S}kzDl(Wr9Kj&Z5X7Vm#+p)9#?C&T3aNP|MU@H#`I&ho3TgR83MCn-3I#>^ zxdkN(NvV0s8M%o?*_nCi3dNPhC8@c3nvA!&6Z7)&OA<>m^Ye=RG?{J*g=7?^CZ+`B z=jXUqq$Zb^}&d)8#Ni9iD(PX^E5|CJup~-lQ(-ERKHAR#87GH8kYI1gb zdO>M?T2X51Ew18{#G;b;vYedUTb#uu`2}F6CgUxE%;Nam{FKz3_=2L;;?%qnO~zaN zPB7Ow=jWwmrfV|Z;tKQe@paD0OwB7v28B8ZgZvFjL(B{ejGq-4Kxrt9A&$wSg@GZ3 z85BB!j2VnUj6qC6%t0*rOv((JtS^}u7#MDGX66-?mc*xI78S9A_-sX~C8b4qzqoQT zi%X(G-ickwpvicPEiJz&H?gD`WSP<}L;Z~W+*JLd%;b#BRQ;6vlC;d6)MEXD%94!y zJbg&G#)HFEKQ}QmPp_cz7EgRUG(c11i+Dgz=Z3gAz96wA1CsPuic$;mZ!s5_6y4&3WP_BA3{^rUr3E>u(MkFFIXVhp@z^3E z1_lOA#v)-*Vgva|2^@0Z1PCTTnWfkdoS42dNHR)(1Q9oQ#6B=fGDjoeeJRfn#2&_N%%seqzz_q9U9dtnhLk8!o+aK5A!u%h zbmL(NVm4wz31uE6mw{3W*e(PS#1g3%g50cnU|getA+Fwg1`lDv4Tc%X>xLEaj}j9EzaEhy!?{Ypo ztw<5XQU(z!AOhs6TO6Pg6>L8@B%gzehFh%Q+VU2xxPUmKD7CmCKd(6T7CSVIZgGJ< z4c1c3%fP^(rlz8zqM`r+;4FWOJHDti4_Z(bSu!v%fSgkN0-W_fh%g99U+0&<$S>dE zaYIP%s*q-b_Z=RAe!nii%RDj-t{<4VS>Mn7}e&A+h zmHSl;O53X))t$r`4v9G`I$JRxwq$g+Vn!+ML3s^a+*^P{hN$8mTG$8i2Jr>)>vJeG zXbOOfd`Ogoa($6BD53>Hgd&Jg0uhK50IGPwX}d@bTsb7?=OvdG6{Y4SSKeaI%!8Ki z{E+fJGp|HPAv3RpaN%wPD)p1V{`k(|%*ZJF5ln)McM$ghulNU65CdAkgPD*L9?XOk z@n9yTj0ZCX7=%PW2!feH3~Ztwg+WZz0w3fbXh5=le!>iHk1+}`gtC`1=`oZtx#==6 z6qbasW2*v?3npw$8dz}-Ys5g>ci!3z3}NhkOy2qo44e#d3`2WScryq|j009s zp%l`bVVrQ2#X&`F3>&N<#8!C(F@{evfmR*z3}GBFz3Gxf=w%9HGJ#fa@(e+O$_#M~ z1zd3q1>AZJ1w1kEsv`iDfMAY*(nu8_Zw$OL31bUlHV4)63=9m`jPZ=6Or^~6@LoXy zUl@k~LnuEq4&)ga3i!kL0ziopYATG10ks;yoG^wk{xGI6<}j8p)*u#hX!YvHvT9{~<2(r8{V-Q<`co2ISQxHd( zNDyb3Xb@KzR}gm?cMwmQSP*ZRco1J0n?6T@L>NmDKeEmO$uP+PP=gn2KZ1yngRuiZ zMLdiHr4<>%B;uHY1k9l+59VrlhVa#(=0+)#JVPl{0edC`l*OOPpunKb01N+gCQ$fi zGRZT@GRT9W&vIzhE>yJyy6l>b)O6B;DP!81&Kw8IXS60 zdR$y(K0dw*Nr@%N8L7nz@S?9op(wSmG_|;-SRpg7B(*3nF(;=|N1-@hp|~_DH?yP! zRBh)b=2a>rmMG-sC8sK67IPJ6rsrj*WhN))mE=?^q$L)E>im+7#5{$f(!4y7j*^Vj zT!rG)qD+u+dR$fd!P%Jw1t1yK6u7g?GfOfQQY$ix!InVkV{pG&wYW;eFTW%Mq`o9y zAtfIw3hFTDCRW6k=NDzC78Pr zS{W&~*wVnwm|Mb$W$E!bi6x*a0j@>1B%>(5G(7{5%`4Ia)lm8%LK8%2 zfe27RcZ&@Y{NN@;5va9#i>)ZNxHPBa7F%LLL26#gE#~6V+#+3&i7X&XZ?UG7<`xud zNuonzX(zPfm%Rd=iXw2_~RBk$fV4?^jqx3p!i8mDRKn4zzJk3CnOv)Q;TnL zL4pu8qHv2N6*?{fZe-nJPb&rWkcw}yK^V8#p)JcIbC4mBz=yVf+(==3iwhC9 zx7Z*-c#9iCLz-Ejt~;ce1#4$PT3Zkj(&pj^4+4M&S0E!BMX?MF4B%S%Jh&J1QI0`X z=Q3a9LiZVJ2h5IJAGE$|;&oQsvW~Ka?hWo2_#*G{2~JS&&Y2UuKzw%0 zRX)uI_xtR8@CKFZf$}RHq0hO6;38TpT&{2!-ryJR;C>(=*x-4Cm#@L?hOB&p=MzEE z>2j0gE(VhjrEpGCQaxju?8D5y{1{vgc2E86eh>3>H+ z`i6km4FTyJqLMe{b#KUO-jGrMz|X)d^NoRnm+zwxgM#`8Q3g(d4-DMGTwf#@#FZDA zUlY^raJwO|I=^;i?dV1B(8v>Fu0;lp#;(Q<= zHbZeL_awHDybR(x-=!J2OuuL_h)7Jgn`C!cSb0I#Rbhh$p9cbxR|S+BJnsmJPRN|B zx4`nMkWPd54PDC}j#qSTCrHkSxhAN2z_`Kd0SDg%mKz*mGk9)r2v3NF5&{#{Zg9xX zsJ_9WzC!8-hvPqS7sy=|(r@s-!NPx?Md~7p)EtxfRp9@JBKJ{3ORM%=Mj_fy?wdr^&BkP)2Fha2I9S#O5x_ za+rzLU7qzYyQ;es$6*~u5qEjk<5D1@n{Jd0x0|HH;HlrD|&1lBx z%lMH2MBd<${=f`kJm44pzzSk~U}F#z{lE@paxk!QedGi&A?-j=K>;dWKp0d@gNLj{ zSU~MSq#-MKtE_+-sY?xNJA(8f;ULBsXv>3vA&ePTV}&sy)#vbzJ*;gZ4{A-Z#4!}G zhA{-d+foQa1sGfz-P{=%3fN*mE(A-2Cj@{-YQSs+0c)edYd}cb4qAUQFkovz1u=!O z!Su;9c$+gYgsY*{-RxoP0if;(!Wu9uh#53O39Fk?y7}0`g%#NrL^})-2GR^M@b*_2 zTNp!t1lSrBB8&l>ufrGuKqVZCN(P1?Hgiys&%nSy&}|$cNFgB25CiJ>fK3a*>WUDC z08pn0tO7vLh8R#z1xtl+gfNG& zg|LONhcENv0{f(T&r$3fqH>~jUhz_)RIFn1vEqfQvjud*pcH= zmcfXQd2ZRqd_4nnQpNcBo?LS zl@x>844K6x@%hzmQ2P#SB}j*Yt*xyl%Pm&0z%7=7{Gt-j@DOOGA|&dT53dI)+uy!QK2USwY!)7T8 zH3|^-St+QdXo`bJ&XF2@9-!71sL;K|2KIJw5vU9;$^f;K*vk`(KyB$;thw2snG;ai zTLh}Yz>Y4;1gYT(D$N5q9z4)iWC$`1)Qv3y&0Bz4ZJ8zUMX8A?mA6F7a&mIx!QL+| z0JT+9^Gi#>?Ig&62&fUO$z2o=G6B@lElL2fK#j;-{3ZD%i8+WFs9QV`0Z5Y>5=!6! zHc+xgv>%H>6{VV*8l<5Hn@ivYjgG;SUQr1H0|U4!Tmo*Wec@r?m!DC28PxK+!@||> z-RONmKx2*aiuBbwJ2wABumD?E-jc|;zFD_#}X=-|1*A#$BV>LQ2K0+S6Y7dWJ@ zaF{=VjhUHjNV&pc{e(lXhx-E)C#UEg9T}tbsl-3P(-RtTu z7u8+9apK_7*z1C>zrjZLdjBd0wdxO9>blu89k$hUw`Mw`<|yi}!G6@5!`+(cn7yLAD(i6- zMi8mN?QX$++!!Q&+?vOOowbS!9PxT3`MEi_*dXnuTP&V_ZvH5(G*Auz4-JB<1Olxz z&`b~qXckDHRhdDP8{AR@H_oa;;FF`^`9&RtlFEYAB0a~V^wQkayb_I!)SLoabr;xV z8mL2Dl9&m~ZwiokHqg`>v=D?$!>Mb=g7Zg}9d@Jr!GqCIgF$nepdu;6-`59Zc3NgS z*c6ZdiOb{{xFEG|%cm0OI_8q!>4f!1{(3RLESo2yU289xRj2F3yK1|OIU zCKMUMm_gIZutthJXdV_zLk(8V!<&%csyB!kI`hhcG{^`lN5N(z2t|f47FeYZnvcac z`+~y;mM})poJ;|!Ho%+22v-EL!rXwRDaINDZ#V~lVi2JX%u-|sgX=C}3j#w#(mgbaX7UZNV`1tr1E0kp>Lh5f&3#2%)EL8!T-1VyTRW%Ze zlR=f8X0d{*1~?^yCUU?$2*XN2RRcPMty!$ebc+?N2s|rWrQiu#f`F8lQu0%a6+jIa z@NkTkLX`|;?E^TQDxk~WVk%1V3n@y5=NO(_?4ao=(AWugQetstGDP<+w#;HsU+@+y zxV*W=}O3g^Q!zbFG)0H#Bik1s0JjEQU{51TP3Ft*E-jV)dPuflK;2r_`@vP-Ewih`6&n>tT6rXSD7< zs3ZYbB%meo*@Y2Or|QK5Kt|Kyv|6^Pm{T*9@G|OE-K9fM>2Tu0qhD$ z^fM>tWES->Fff2!4JvaQ7@n|jwtF^t_N27=gH_(*h>r)QlK6N{rCaRr@hSPq@$t8~ z;^QHsdhzi^pwKMx1tq~65CQ7j6*YtG6+(<9>p@f(7lC{X8MR~wExpLeOoEJ-u$5GT z27!w}vpo=3%R-j(=t0T_$Z{UN`61ga+>jb2dm_m&(s)hV!^M-jO7d`k+O zB53m()F{0rk4*u%fsDL%gfl(0#3vs-n8u7{quXDDJXv{LRqF3sFgzLz8bfxP1pz%3qC`7)3sCF|hD-R9#_~ Ryvf4e=K7JDfkg_O3;-^Mz#{+v literal 0 HcmV?d00001 diff --git a/prompt_bench/__pycache__/models.cpython-314.pyc b/prompt_bench/__pycache__/models.cpython-314.pyc new file mode 100644 index 0000000000000000000000000000000000000000..18f651ce56e9c90e3bcbe5cb35d25e8c75703ba6 GIT binary patch literal 1023 zcmdPqMh1q*APx+(LKuvnEf^RW8W@5YN*Ll8f*3s+J(xQDxHAJl*GJ}%w&b!{FKz3VuiH) zB88;XyyT4B#G-74tY#77=su}nBq7}m{k~p7)w}!m||IjnAI2<7=lNm1-AHb|&d@#$Bk73)Jd`Wb1rn2Sq_ZgGP7@hO=_nGW&{47Zpw^Ga^96yz6` zq@O(l0&nbZv8LtZCzjk2OD`ym&rQwEFRF|$Ey>KutO5sdIwUw57;bTwq~;c+7A2OH z7NutDFfuUQVlTVH@>(ev8bf9ARel2 zC4-+P-z|>#_`Jm2)cE*YT=DU_`6;D2AU02Yd|_!~4pfFeK0Yn8sJJ92GcPqSKR*5z zdwhIKesX;LE&lj;r1+1IFXCWeU?>8m(jp!Zix))jfe3yOA;7@Ea7#2E z&n+m42Pa8r0P7W07J5NUAf zP`=C}azjPq0*llP*UKy#HzZ{myeEiWW|6reEY{%O5qz0N?1s2RgZBjC%PbN%6qGNp zNX$^a&Z1l-3GxNVDnCuOA~BF}xZ>l}N^F#BDXqC!q;dS-D>Vo7FMszPF3Voqgoi9%{cNl{{QNoHQULP35`W^$!MacNR< zNn&1dszO>(ey)OZeqMS}YH@L9eqLgZLQ-ZM*}QKdpj zMq-IVa$*5Uv{)gvEVZaoAtkf8Br`9$1m-A^6Z7*F9yLr+$jnR5DNO-6xHvUAKQ9Gj zL~2@EYH~?2#HifN^o$aPq*R5Z(xS}N6ot$@u%A+sOEUBGiuJg-Tq8n)9Gycv{rwbz zLVa9=t+;d*Jl*~LgIui?$}>`nQWJ|6l5-MEi&Kkr6p~T=pO%>ivKnd=C|CcMM=fra4H7{Ye_~bBsPLQ9DRHgQu1{a0)qTKJe@p4bQFSI z14BK7Ty+#2Lqk0LgFK@^OkY<&7aax1fPf(XfFMuD5LX?A{2~R{;1EYAAJ1S9kiEg7 zL2i!Dt_mO%Lp*|99bJO06g<-u;Fgu-mnLVV7AxeI=9FX>Sv7k5=WN0qPQYTjhXMaEE zAlDFAD+RyAT(FEnA~@nxQxppF3rayrJ3mhcq(vb&H8~?OFS9ro5{MwDrDPUC(tv_S zYDGa}UP`fkQEG8XQD$;Uv3_o1W?o5RW?r!-$gBECDOh zAQ@^uUWM_NHnA}F~cG6~2`9dL+v1^fG1adG(vxCS|fc!s$uK$38< z6<4$$IMJl0KteV@4dEnkeoag+DNW4DsRUcAqmZAMssPPT1*t^}Ihml8mYZ0iU}T_B zo?nzwtdN?RoDs{#<>=<->Kx+gq7dL85bEOy$}k|?!ZY#}(m_F`P@JEeT9T2Om#(8w zo{_JRlV6-#j4YU61QIV!RY*$BNG!|DF9H=6MXBkam`Y8F<>K;nb@p)d^9=R{nd^~X z4hoy{{G#k)D}}t&a)q=~aBkC4$W6>kNi0bPWuyFz%%n_E_R&#D%uUTp0p&DM@dM9D z#U+U)r69HGMTvPOpzM=gl$fidP+U@)QVA-hax=lDnT|qgNwQun7gvD4kEe5_0xTtS zMfdJ3V%sS1$T%t_2kFHKAb z6@vMos0SO7oS%|f46;2q($@*(0R4a%-21%E%ENGq=B$kbv`-t)@`Ck;?3 z3%9qRC^I*)C^M&0AyJ_ywKzYg6cQvUsl}P;dEnv&oNp7$6N^&wQj3cfQc{z1K$$Ev zKTn}Nvm`?yFCVGcfE1wiv0Pj}u72(z9tt6jLGG?0Rtkou2D(P3u!K>PUy_)kr{I*T zP*hrykpnU}F)6>aL?OAf1XPEBaz$QoW=W+&epzY}D6k81QcJ)N(5p%f1{e65RgkQk zmY5G$@~P#%d-&d)1J z%`2(((`33O9USWG>lox21x~Z^!I8lsuDeE|J`WgATsrp5k$r+ic`YHJ(X_+~x#rg%6B^mj7`UOS#xdkQhNvV0s8Tyb~EVBw6 z=J61zV!eXOTO2mI`6;D2sdh!|3=9mQdb(JQfq~%zGb1D82PPT$SjGzs(jRP?<%KRW LNEfj(FfafB1(~lc literal 0 HcmV?d00001 diff --git a/prompt_bench/__pycache__/vllm_client.cpython-314.pyc b/prompt_bench/__pycache__/vllm_client.cpython-314.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f19e179c4b831c8616d7f6a4754ab9c6d21a62ad GIT binary patch literal 4687 zcmdPqvR7&N7o_Gndg>q~#YWl==Ah zsuwFb26*aeGT!1&%*)F!Ni50C&nx!RWW2=^oSKuC4l$6yAsNa5Q*2<8f$?)4*mJQA zC5%A~B}{PwCCovLB`iTqLCircL99V+LG1d>K^)=?L7d_YIRZIMC9FYQ;tWCDAYKVu z5KjE- zE-oo50vU6QIWw=Mh?9YV;TBs_YDsBP-Ypit{Jhka44RC$*wXTgauZ96LG~)$ve3`S z&rQ`Y%1qA4Ow~`xFGsJA^oe8$eIX_RqTn+q zNWkF(VlXo>Fo3+s%)r3=*#~S|3`iV|13)1FW+4bgh9Jf;MjnPBrZ6TR1|ya*<{)Ns zsJbwQJSdBSAqW(PtX101MX8A;sS1g(?2??HmzSDclA3~%w)CplGfGMdtn~G7FEkI9Wk5?`AbY^+O98AOcly!?l_ji2++gSACRW6kCuWuu@iH(l6!C!weh>jF zsWcgj1VPaYPDxL$tWI+;AV4J=(@Ukj=1Q832KY*weCSV@KiSVdkU|@hc zk@s^KBd8E16vZac_zq*#fwDkU0Ta>sm@I@C7*di!8BCfXj3EGIKgb9q9Kepmfii;_ zVFhvla~N{~D91r%V3azPks<*yI|f{BfP?}-p$K9i;UFfMY4Qv~%*qVX3`p(=r3oap zAf_Ti3@Cs>Oa_Eb?;22I1}Aw|Bjy5@7;&%y1_p*O7MQ9GMg@i#P!xituvIo}3@I!i z7t1rqGWfV;GFUUqGGsDpvQ?P|f{32MXs8_{>&`~7FZillwWj(bg6rL|Z1T$Z++z;Q!Z`ns^%bz!v&!fJO!q^5gM^uC~Ac16Verij!BRt6FEFI)_~ zg8g1yURT7muJLH!5YSl>vBGOZ;srj74(2<2QZtM%^T}V}kpJ<8lR-rCy0H31Vf8Nz zjJ%>Z1w=kDGxCc5Dh4H<9Xd{03BF&}1RbY^0{#a)t`Taa3mSW;S) z3TrMf+~NWi6eaoDsd>ej3<79H4=DYEiylxuZD7Dz^f19%5kVY5O!}|yLd96F#<9I7*p0ht%XiOf@G(B!JJ3r@{TQAkvP z#22XY1eE}w=4ehTxTUF(n3tjeX_J6U4_Jm$C`qj-(JL|oMWiW+07Wl2UaRCmW@hH4 zBN?h%te~NqqL7@CSX8W8#gAqOs4<&WoS%1#Jvk#kGdZ>R77NI9P4*&Cxm*NlU)^HP zNzH?lBP<2^#U-}{iV`!6Q{&U}i{gt*5=%;pi!2!!7&JK`#Q@kvV7I}Yb&I_qu`(w= zF{MZkln_}`5=#<`L5V?4O-)4wUM6rr{ho?apTh*f39%ZSa6Ygx@QO?by}=_gA?*f_ z$930kz%PS2(t_^Rm0k52l^Had!Pyg%>4<6;Sb>@aphiLi1E@JD z3u^4iPACCW2#0_i0m4v+@O}nq134rBB#MRup!oz@7?y(N8G=C0WU3N#&dD!M1*bTV zkdOdqGf&S?lL?XtSd&5WD;dGj4Q?8%fy?wFS5V{%AQk3dg&_A7gX-o6h6fxx{XCsK z*Eysva!6g{kU{u@vfu>ys)$fQW``P_BA{>-nPCE^5DtNb49G#?q6$=+fPzz*L6fmc z$~6yE)q<+>9D=K>gWMElNi0e)zQvZGl$DxXLPC;r0y!U4 zhCz~?C@645XQYAp8=@<~Ats7&A;@(g3`%+Ipw5OGICF+E1i;)5l7(W7R3?Wtl_})s zDWq0pmgxCuGJ?~SCKEWB75Rc(2C_*_3!K<)am2@i6k;^~A%b9!fQoZSqT}l1f;RIt z8NrFzPm>2^LSAAns6)mTAD^3_Qknx|^Tfv&mL}#vW%%Rc(=v;SOF%7}y!`n1TkP@i zDf!9q@kOBKNs$4l^mPOgptN7)1!95n-7V30aM75V9G_THQk0ogT9R5E9}h_*pxy@9 z4@IDw3`~I1ZgDUJ14A3bMH$?%#rJP{4;X?kIuK7ec~81wlCDR}NlQg%5H( ztXe0SAysaCGPrX7D#OGo`;~)0i8T|?;09w+5Xg(*szDpX0{Z~udWbEcY<`Qw z1`?QdMWD(YlopE385kHoFf%eT-ek~w%AoX^f$ug0_gx0@#|%6VI0deAN?qcVnqhI7 aQ>B6Jftch42I22)CXAw=8B7=<=?wr^G2Q+E literal 0 HcmV?d00001 diff --git a/prompt_bench/__pycache__/vllm_container.cpython-314.pyc b/prompt_bench/__pycache__/vllm_container.cpython-314.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ed66520b0400f43c289bca09e5014dcab27f961a GIT binary patch literal 3102 zcmdPq+|;}h zg|z%4g)$!>Up-C6Til6xdHE%YC7Jno#eP*>WjQ&yx=E>d$r)7=Ahte;(9JJM%}dO* zDl^bCwA3?52006aL9PQiotc4w@v{!tvtbNzOcpH+3@OYE3^ELXj2VnUj6qC6%t0(c ztU*jcY(eZn96_A=tjY|UTrZgz7#MD`=H{oQ=GM?Zfb6RQDuB-NoG!F71+7ASkrRy6H9Kf6{VJx7UkVy@ypLkUCE%yc#AD9 zzbH4cq!{EzrCZke8Tq-X`bC+^8JVg2DfuO7nK`M&`URCG8Tono1x5L}1tsxdFM~ZB z4-Y)Og34Pw@$t|AN{x>%5@cXt;A3E5D0T*W_B(?%qZi{xF!_L+?*l7{`+<#to9_cV zn90E)ApC(7%;aK_Rr*sBZ;Isn` zycm!?7zeR}0x^tSh|7p61{9bO72H8QVGKMBModAxVGKchF|1%= z1_p*OW>|^};tvwg6DeScVS~v9aKcy!I!G{#!31iqJVOjOOvVJtkY|YDfif8aK;Z)8 zKxsvWFqR;pAfX`PAQ5wr?-&>u3RuHf0|cN7kf=BYi*g2r6iraF2@(xs4G=<-gfas} zp$sGn<`$5D3)o`BVNzl6+>^mbz?WiStN{`*eFz$#Z9(EXQV5lZFba}TX3&&Olx1XK z2rfx1Dp5!TXEdY?S)5u_mYJ8XP?C|Vke*qVnx_Cx=z3gSjz#IkR$K}S3J?)11&`A7 z^vt|;x5VUBh>$``W>IQ#Nq$kKLSABSs)9yQYEEKFW?8C2Nj^jcD8Fdxfeis=B`XDw z{NfS?5EG;%DKjqxA_=w=q!^|mu_OcLNRaDrTb-F&3^4+GrnFLUD@sfTl_m=LX$tNE zp$d?U3$nhnI8{NzK-W-HkLwm&3b>@Y#avXHSH+~8QpKjL3-VKyw61PSYFTD-s%>6b zW=dwFUUGh}etJQvZDLMN6_>7VW5`^ zRjGla6cjw5!V?x;sYMFqnI##J@V8P>Ee>(hWWL3hoLEp&T9g`}Us_U7T5^k}B(x*na$3F;R(q;7D?f^{q1;o!c&qjZ@=`38sN4Gz8! znxgz{4enob7+AU6y&JtR2w3b;y~5&fhlRJr`wp*oe_U7G4EGH#J4#NNUf~UFaJ#|I zd!1e4BD=&4$1CizHv|keh;5hMD0|(|<)Wd>Wka_M2JRR5JUW=~@JY=uzRV|ofkXZd z3m3#_ofT>8b64hG*K@q6=XhDq`GE6PJ&$WFo<*Qi4W<4B6|dm>QwHpN?DeNUqcVdg za}lTrEdoV;5icmzK_#vxV-Y{7LIPKH;IIW(QeeUcBq0no_B#U?quECgF~I~(K^%f8 z!WclU7)Z^d1g?1yHBJDuJ%g+`h!Iwk$TP%%oCTH)0HrxF3qdF{fU5r(4w#4@q$$8= z4mFi1{p>*;NG7m=8xy3Nz!}69#1X`eWC|OGDKIB8VD&K%!pA|p$_$!(Rl30?`2`Az zc_|7-sk!-}QaB?OQrv@E6S;{+*@!|yuSy)Gya2QKC<4{_w^%@VGsI1ksR&es6-k1c zModMyRZO~Rx7hPiOUm<$vTt#vWELmq=jEj)m)v4YD#}brPpwkIC@S&`3Q|*$+?C>| z$yy`}N-=UELIFg8@_dmrhy_YT;M4#~J@Ozi5CKj-Mc~x)1eUBdudry{Vc~1{Yx0}m zF(YkB-UVUP11T4R!mqMKAjDG^xUA8;AZ&R;a#hIR3;sYt@IO5|$)ku6i#KT}8f^rtPlqmxFs|eM@w>WHa^HWN5QtgV& z7#J8p`KZ{Ofq~%zGb1D8O$O7a3`&m~_--=@KHwC%&M9??Q)-6AWlog_wg-Z;4SY9v V)f>3JvVq!i-(wiL7|p;=2LOPL-hKc8 literal 0 HcmV?d00001 diff --git a/prompt_bench/batch_bill_summarizer.py b/prompt_bench/batch_bill_summarizer.py new file mode 100644 index 0000000..b8b08f3 --- /dev/null +++ b/prompt_bench/batch_bill_summarizer.py @@ -0,0 +1,238 @@ +"""Submit an OpenAI Batch API bill-summarization job over compressed text. + +Reads the first N bills from a CSV with a `text_content` column, compresses +each via `bill_token_compression.compress_bill_text`, builds a JSONL file of +summarization requests, and submits it as an asynchronous Batch API job +against `/v1/chat/completions`. Also writes a CSV of per-bill pre/post- +compression token counts. +""" + +from __future__ import annotations + +import csv +import json +import logging +import re +import sys +import tomllib +from os import getenv +from pathlib import Path +from typing import Annotated + +import httpx +import typer +from tiktoken import Encoding, get_encoding + +from python.prompt_bench.bill_token_compression import compress_bill_text + +_PROMPTS_PATH = Path(__file__).resolve().parents[2] / "config" / "prompts" / "summarization_prompts.toml" +_PROMPTS = tomllib.loads(_PROMPTS_PATH.read_text())["summarization"] +SUMMARIZATION_SYSTEM_PROMPT: str = _PROMPTS["system_prompt"] +SUMMARIZATION_USER_TEMPLATE: str = _PROMPTS["user_template"] + +logger = logging.getLogger(__name__) + +OPENAI_API_BASE = "https://api.openai.com/v1" + + +def load_bills(csv_path: Path, count: int = 0) -> list[tuple[str, str]]: + """Return (bill_id, text_content) tuples with non-empty text. + + If `count` is 0 or negative, all rows are returned. + """ + csv.field_size_limit(sys.maxsize) + bills: list[tuple[str, str]] = [] + with csv_path.open(newline="", encoding="utf-8") as handle: + reader = csv.DictReader(handle) + for row in reader: + text_content = (row.get("text_content") or "").strip() + if not text_content: + continue + bill_id = row.get("bill_id") or row.get("id") or f"row-{len(bills)}" + version_code = row.get("version_code") or "" + unique_id = f"{bill_id}-{version_code}" if version_code else bill_id + bills.append((unique_id, text_content)) + if count > 0 and len(bills) >= count: + break + return bills + + +def safe_filename(value: str) -> str: + """Make a string safe for use as a filename or batch custom_id.""" + return re.sub(r"[^A-Za-z0-9._-]+", "_", value).strip("_") or "unnamed" + + +def build_request(custom_id: str, model: str, bill_text: str) -> dict: + """Build one OpenAI batch request line.""" + return { + "custom_id": custom_id, + "method": "POST", + "url": "/v1/chat/completions", + "body": { + "model": model, + "messages": [ + {"role": "system", "content": SUMMARIZATION_SYSTEM_PROMPT}, + {"role": "user", "content": SUMMARIZATION_USER_TEMPLATE.format(text_content=bill_text)}, + ], + }, + } + + +def write_jsonl(path: Path, lines: list[dict]) -> None: + """Write a list of dicts as JSONL.""" + with path.open("w", encoding="utf-8") as handle: + for line in lines: + handle.write(json.dumps(line, ensure_ascii=False)) + handle.write("\n") + + +def upload_file(client: httpx.Client, path: Path) -> str: + """Upload a JSONL file to the OpenAI Files API and return its file id.""" + with path.open("rb") as handle: + response = client.post( + f"{OPENAI_API_BASE}/files", + files={"file": (path.name, handle, "application/jsonl")}, + data={"purpose": "batch"}, + ) + response.raise_for_status() + return response.json()["id"] + + +def prepare_requests( + bills: list[tuple[str, str]], + *, + model: str, + encoder: Encoding, +) -> tuple[list[dict], list[dict]]: + """Build (request_lines, token_rows) from bills. + + Each bill is compressed before being turned into a request line. + Each `token_rows` entry has chars + token counts for one bill so the caller + can write a per-bill CSV. + """ + request_lines: list[dict] = [] + token_rows: list[dict] = [] + for bill_id, text_content in bills: + raw_token_count = len(encoder.encode(text_content)) + compressed_text = compress_bill_text(text_content) + compressed_token_count = len(encoder.encode(compressed_text)) + token_rows.append( + { + "bill_id": bill_id, + "raw_chars": len(text_content), + "compressed_chars": len(compressed_text), + "raw_tokens": raw_token_count, + "compressed_tokens": compressed_token_count, + "token_ratio": (compressed_token_count / raw_token_count) if raw_token_count else None, + }, + ) + safe_id = safe_filename(bill_id) + request_lines.append(build_request(safe_id, model, compressed_text)) + return request_lines, token_rows + + +def write_token_csv(path: Path, token_rows: list[dict]) -> tuple[int, int]: + """Write per-bill token counts to CSV. Returns (raw_total, compressed_total).""" + with path.open("w", newline="", encoding="utf-8") as handle: + writer = csv.DictWriter( + handle, + fieldnames=["bill_id", "raw_chars", "compressed_chars", "raw_tokens", "compressed_tokens", "token_ratio"], + ) + writer.writeheader() + writer.writerows(token_rows) + raw_total = sum(row["raw_tokens"] for row in token_rows) + compressed_total = sum(row["compressed_tokens"] for row in token_rows) + return raw_total, compressed_total + + +def create_batch(client: httpx.Client, input_file_id: str, description: str) -> dict: + """Create a batch job and return its full response payload.""" + response = client.post( + f"{OPENAI_API_BASE}/batches", + json={ + "input_file_id": input_file_id, + "endpoint": "/v1/chat/completions", + "completion_window": "24h", + "metadata": {"description": description}, + }, + ) + response.raise_for_status() + return response.json() + + +def main( + csv_path: Annotated[Path, typer.Option("--csv", help="Bills CSV path")] = Path("bills.csv"), + output_dir: Annotated[Path, typer.Option("--output-dir", help="Where to write JSONL + metadata")] = Path( + "output/openai_batch", + ), + model: Annotated[str, typer.Option(help="OpenAI model id")] = "gpt-5-mini", + count: Annotated[int, typer.Option(help="Max bills to process, 0 = all")] = 0, + log_level: Annotated[str, typer.Option(help="Log level")] = "INFO", +) -> None: + """Submit an OpenAI Batch job of compressed bill summaries.""" + logging.basicConfig(level=log_level, format="%(asctime)s %(levelname)s %(name)s: %(message)s") + + api_key = getenv("CLOSEDAI_TOKEN") or getenv("OPENAI_API_KEY") + if not api_key: + message = "Neither CLOSEDAI_TOKEN nor OPENAI_API_KEY is set" + raise typer.BadParameter(message) + if not csv_path.is_file(): + message = f"CSV not found: {csv_path}" + raise typer.BadParameter(message) + + output_dir.mkdir(parents=True, exist_ok=True) + + logger.info("Loading %d bills from %s", count, csv_path) + bills = load_bills(csv_path, count) + if len(bills) < count: + logger.warning("Only %d bills available (requested %d)", len(bills), count) + + encoder = get_encoding("o200k_base") + request_lines, token_rows = prepare_requests(bills, model=model, encoder=encoder) + + token_csv_path = output_dir / "token_counts.csv" + raw_tokens_total, compressed_tokens_total = write_token_csv(token_csv_path, token_rows) + logger.info( + "Token counts: raw=%d compressed=%d ratio=%.3f -> %s", + raw_tokens_total, + compressed_tokens_total, + (compressed_tokens_total / raw_tokens_total) if raw_tokens_total else 0.0, + token_csv_path, + ) + + jsonl_path = output_dir / "requests.jsonl" + write_jsonl(jsonl_path, request_lines) + logger.info("Wrote %s (%d bills)", jsonl_path, len(request_lines)) + + headers = {"Authorization": f"Bearer {api_key}"} + with httpx.Client(headers=headers, timeout=httpx.Timeout(300.0)) as client: + logger.info("Uploading JSONL") + file_id = upload_file(client, jsonl_path) + logger.info("Uploaded: %s", file_id) + + logger.info("Creating batch") + batch = create_batch(client, file_id, f"compressed bill summaries x{len(request_lines)} ({model})") + logger.info("Batch created: %s", batch["id"]) + + metadata = { + "model": model, + "count": len(bills), + "jsonl": str(jsonl_path), + "input_file_id": file_id, + "batch_id": batch["id"], + "raw_tokens_total": raw_tokens_total, + "compressed_tokens_total": compressed_tokens_total, + "batch": batch, + } + metadata_path = output_dir / "batch.json" + metadata_path.write_text(json.dumps(metadata, indent=2)) + logger.info("Wrote metadata to %s", metadata_path) + + +def cli() -> None: + """Typer entry point.""" + typer.run(main) + + +if __name__ == "__main__": + cli() diff --git a/prompt_bench/bill_token_compression.py b/prompt_bench/bill_token_compression.py new file mode 100644 index 0000000..9118ea7 --- /dev/null +++ b/prompt_bench/bill_token_compression.py @@ -0,0 +1,162 @@ +"""Lossless-ish text compression for Congressional bill text.""" + +from __future__ import annotations + +import re + +STATES = ( + "Alabama", + "Alaska", + "Arizona", + "Arkansas", + "California", + "Colorado", + "Connecticut", + "Delaware", + "Florida", + "Georgia", + "Hawaii", + "Idaho", + "Illinois", + "Indiana", + "Iowa", + "Kansas", + "Kentucky", + "Louisiana", + "Maine", + "Maryland", + "Massachusetts", + "Michigan", + "Minnesota", + "Mississippi", + "Missouri", + "Montana", + "Nebraska", + "Nevada", + "New Hampshire", + "New Jersey", + "New Mexico", + "New York", + "North Carolina", + "North Dakota", + "Ohio", + "Oklahoma", + "Oregon", + "Pennsylvania", + "Rhode Island", + "South Carolina", + "South Dakota", + "Tennessee", + "Texas", + "Utah", + "Vermont", + "Virginia", + "Washington", + "West Virginia", + "Wisconsin", + "Wyoming", + "Puerto Rico", + "Guam", + "American Samoa", + "District of Columbia", + "US Virgin Islands", +) +STATE_PATTERNS = [(re.compile(re.escape(state), re.IGNORECASE), state) for state in STATES] + + +def normalize_state_names(text: str) -> str: + """Replace any casing of state names with title case.""" + for pattern, replacement in STATE_PATTERNS: + text = pattern.sub(replacement, text) + return text + + +def strip_number_commas(text: str) -> str: + """Remove commas from numeric thousands separators.""" + return re.sub(r"(\d{1,3}(?:,\d{3})+)", lambda match: match.group().replace(",", ""), text) + + +def strip_horizontal_rules(text: str) -> str: + """Remove ASCII horizontal-rule lines built from underscores, dashes, equals, or asterisks.""" + return re.sub(r"^\s*[_\-=\*]{3,}\s*$", "", text, flags=re.MULTILINE) + + +def collapse_double_dashes(text: str) -> str: + """Replace ``--`` em-dash stand-ins with a single space so they don't tokenize oddly.""" + return text.replace("--", " ") + + +def collapse_inline_whitespace(text: str) -> str: + """Collapse runs of horizontal whitespace (spaces, tabs) into a single space, leaving newlines intact.""" + return re.sub(r"[^\S\n]+", " ", text) + + +def collapse_blank_lines(text: str) -> str: + """Collapse three-or-more consecutive newlines down to a blank-line separator.""" + return re.sub(r"\n{3,}", "\n\n", text) + + +def trim_line_edges(text: str) -> str: + """Strip spaces immediately before and after newline characters on every line.""" + text = re.sub(r" +\n", "\n", text) + return re.sub(r"\n +", "\n", text) + + +def shorten_section_markers(text: str) -> str: + """Rewrite ``Sec. 12.`` style section headings as the more compact ``SEC 12``.""" + return re.sub(r"(?i)sec\.\s*(\d+[a-zA-Z]?)\.", r"SEC \1", text) + + +def unwrap_parens(text: str) -> str: + """Strip parentheses around short alphanumeric labels like ``(a)`` or ``(12)``.""" + return re.sub(r"\(([a-zA-Z0-9]+)\)", r"\1", text) + + +def strip_typeset_quotes(text: str) -> str: + """Remove the `` and '' typeset quote markers used in the GPO bill format.""" + return text.replace("``", "").replace("''", "") + + +def normalize_usc_acronym(text: str) -> str: + """Collapse ``U.S.C.`` to ``USC`` to save tokens on the common citation.""" + return text.replace("U.S.C.", "USC") + + +def normalize_us_acronym(text: str) -> str: + """Normalize the various ``U.S.``/``U. S.`` spellings to the bare ``US`` form.""" + for acronym in ("U. S.", "u. s.", "U.S. ", "u.s. "): + text = text.replace(acronym, "US ") + return text + + +def collapse_ellipses(text: str) -> str: + """Collapse runs of two-or-more periods (``...``, ``....``) down to a single period.""" + return re.sub(r"\.{2,}", ".", text) + + +COMPRESSION_STEPS = ( + strip_horizontal_rules, + collapse_double_dashes, + collapse_inline_whitespace, + collapse_blank_lines, + trim_line_edges, + shorten_section_markers, + unwrap_parens, + strip_typeset_quotes, + normalize_usc_acronym, + normalize_us_acronym, + strip_number_commas, + collapse_ellipses, + normalize_state_names, +) + + +def compress_bill_text(text: str) -> str: + """Apply lossless-ish whitespace and boilerplate compression to bill text. + + Runs every transform in :data:`COMPRESSION_STEPS` in order, then strips + leading/trailing whitespace from the final result. + """ + for step in COMPRESSION_STEPS: + text = step(text) + return text.strip() diff --git a/prompt_bench/compresion_test.py b/prompt_bench/compresion_test.py new file mode 100644 index 0000000..f246f14 --- /dev/null +++ b/prompt_bench/compresion_test.py @@ -0,0 +1,241 @@ +"""Run two interactive OpenAI chat-completion sweeps over bill text. + +Reads the first N bills from a CSV with a `text_content` column and sends two +sweeps through `/v1/chat/completions` concurrently — one with the raw bill +text, one with the compressed bill text. Each request's prompt is saved to +disk alongside the OpenAI response id so the prompts and responses can be +correlated later. +""" + +from __future__ import annotations + +import csv +import json +import logging +import re +import sys +import time +import tomllib +from concurrent.futures import ThreadPoolExecutor, as_completed +from os import getenv +from pathlib import Path +from typing import Annotated + +import httpx +import typer + +from python.prompt_bench.bill_token_compression import compress_bill_text + +_PROMPTS_PATH = Path(__file__).resolve().parents[2] / "config" / "prompts" / "summarization_prompts.toml" +_PROMPTS = tomllib.loads(_PROMPTS_PATH.read_text())["summarization"] +SUMMARIZATION_SYSTEM_PROMPT: str = _PROMPTS["system_prompt"] +SUMMARIZATION_USER_TEMPLATE: str = _PROMPTS["user_template"] + +logger = logging.getLogger(__name__) + +OPENAI_API_BASE = "https://api.openai.com/v1" +DEFAULT_MODEL = "gpt-5.4-mini" +DEFAULT_COUNT = 100 +SEED = 42 + + +def load_bills(csv_path: Path, count: int) -> list[tuple[str, str]]: + """Return up to `count` (bill_id, text_content) tuples with non-empty text.""" + csv.field_size_limit(sys.maxsize) + bills: list[tuple[str, str]] = [] + with csv_path.open(newline="", encoding="utf-8") as handle: + reader = csv.DictReader(handle) + for row in reader: + text_content = (row.get("text_content") or "").strip() + if not text_content: + continue + bill_id = row.get("bill_id") or row.get("id") or f"row-{len(bills)}" + version_code = row.get("version_code") or "" + unique_id = f"{bill_id}-{version_code}" if version_code else bill_id + bills.append((unique_id, text_content)) + if len(bills) >= count: + break + return bills + + +def build_messages(bill_text: str) -> list[dict]: + """Return the system + user message pair for a bill.""" + return [ + {"role": "system", "content": SUMMARIZATION_SYSTEM_PROMPT}, + {"role": "user", "content": SUMMARIZATION_USER_TEMPLATE.format(text_content=bill_text)}, + ] + + +def safe_filename(value: str) -> str: + """Make a string safe for use as a filename.""" + return re.sub(r"[^A-Za-z0-9._-]+", "_", value).strip("_") or "unnamed" + + +def run_one_request( + client: httpx.Client, + *, + bill_id: str, + label: str, + bill_text: str, + model: str, + output_path: Path, +) -> tuple[bool, float, str | None]: + """Send one chat-completion request and persist prompt + response. + + Returns (success, elapsed_seconds, response_id). + """ + messages = build_messages(bill_text) + payload = { + "model": model, + "messages": messages, + "seed": SEED, + } + start = time.monotonic() + record: dict = { + "bill_id": bill_id, + "label": label, + "model": model, + "seed": SEED, + "input_chars": len(bill_text), + "messages": messages, + } + try: + response = client.post(f"{OPENAI_API_BASE}/chat/completions", json=payload) + response.raise_for_status() + body = response.json() + except httpx.HTTPStatusError as error: + elapsed = time.monotonic() - start + record["error"] = { + "status_code": error.response.status_code, + "body": error.response.text, + "elapsed_seconds": elapsed, + } + output_path.write_text(json.dumps(record, ensure_ascii=False, indent=2)) + logger.exception("HTTP error for %s/%s after %.2fs", label, bill_id, elapsed) + return False, elapsed, None + except Exception as error: + elapsed = time.monotonic() - start + record["error"] = {"message": str(error), "elapsed_seconds": elapsed} + output_path.write_text(json.dumps(record, ensure_ascii=False, indent=2)) + logger.exception("Failed: %s/%s after %.2fs", label, bill_id, elapsed) + return False, elapsed, None + + elapsed = time.monotonic() - start + response_id = body.get("id") + record["response_id"] = response_id + record["elapsed_seconds"] = elapsed + record["usage"] = body.get("usage") + record["response"] = body + output_path.write_text(json.dumps(record, ensure_ascii=False, indent=2)) + logger.info("Done: %s/%s id=%s in %.2fs", label, bill_id, response_id, elapsed) + return True, elapsed, response_id + + +def main( + csv_path: Annotated[Path, typer.Option("--csv", help="Bills CSV path")] = Path("bills.csv"), + output_dir: Annotated[Path, typer.Option("--output-dir", help="Where to write per-request JSON")] = Path( + "output/openai_runs", + ), + model: Annotated[str, typer.Option(help="OpenAI model id")] = DEFAULT_MODEL, + count: Annotated[int, typer.Option(help="Number of bills per set")] = DEFAULT_COUNT, + concurrency: Annotated[int, typer.Option(help="Concurrent in-flight requests")] = 16, + log_level: Annotated[str, typer.Option(help="Log level")] = "INFO", +) -> None: + """Run two interactive OpenAI sweeps (compressed + uncompressed) over bill text.""" + logging.basicConfig(level=log_level, format="%(asctime)s %(levelname)s %(name)s: %(message)s") + + api_key = getenv("CLOSEDAI_TOKEN") or getenv("OPENAI_API_KEY") + if not api_key: + message = "Neither CLOSEDAI_TOKEN nor OPENAI_API_KEY is set" + raise typer.BadParameter(message) + if not csv_path.is_file(): + message = f"CSV not found: {csv_path}" + raise typer.BadParameter(message) + + compressed_dir = output_dir / "compressed" + uncompressed_dir = output_dir / "uncompressed" + compressed_dir.mkdir(parents=True, exist_ok=True) + uncompressed_dir.mkdir(parents=True, exist_ok=True) + + logger.info("Loading %d bills from %s", count, csv_path) + bills = load_bills(csv_path, count) + if len(bills) < count: + logger.warning("Only %d bills available (requested %d)", len(bills), count) + + tasks: list[tuple[str, str, str, Path]] = [] + for bill_id, text_content in bills: + filename = f"{safe_filename(bill_id)}.json" + tasks.append((bill_id, "compressed", compress_bill_text(text_content), compressed_dir / filename)) + tasks.append((bill_id, "uncompressed", text_content, uncompressed_dir / filename)) + + logger.info("Submitting %d requests at concurrency=%d", len(tasks), concurrency) + + headers = {"Authorization": f"Bearer {api_key}"} + completed = 0 + failed = 0 + index: list[dict] = [] + wall_start = time.monotonic() + with ( + httpx.Client(headers=headers, timeout=httpx.Timeout(300.0)) as client, + ThreadPoolExecutor( + max_workers=concurrency, + ) as executor, + ): + future_to_task = { + executor.submit( + run_one_request, + client, + bill_id=bill_id, + label=label, + bill_text=bill_text, + model=model, + output_path=output_path, + ): (bill_id, label, output_path) + for bill_id, label, bill_text, output_path in tasks + } + for future in as_completed(future_to_task): + bill_id, label, output_path = future_to_task[future] + success, elapsed, response_id = future.result() + if success: + completed += 1 + else: + failed += 1 + index.append( + { + "bill_id": bill_id, + "label": label, + "response_id": response_id, + "elapsed_seconds": elapsed, + "success": success, + "path": str(output_path), + }, + ) + wall_elapsed = time.monotonic() - wall_start + + summary = { + "model": model, + "count": len(bills), + "completed": completed, + "failed": failed, + "wall_seconds": wall_elapsed, + "concurrency": concurrency, + "results": index, + } + summary_path = output_dir / "summary.json" + summary_path.write_text(json.dumps(summary, indent=2)) + logger.info( + "Done: completed=%d failed=%d wall=%.1fs summary=%s", + completed, + failed, + wall_elapsed, + summary_path, + ) + + +def cli() -> None: + """Typer entry point.""" + typer.run(main) + + +if __name__ == "__main__": + cli() diff --git a/prompt_bench/containers/__init__.py b/prompt_bench/containers/__init__.py new file mode 100644 index 0000000..dc58a44 --- /dev/null +++ b/prompt_bench/containers/__init__.py @@ -0,0 +1 @@ +"""Prompt benchmarking system for evaluating LLMs via vLLM.""" diff --git a/prompt_bench/containers/__pycache__/__init__.cpython-314.pyc b/prompt_bench/containers/__pycache__/__init__.cpython-314.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a4bfe27b7766bf40b257d2c78dc7b9d16452be1d GIT binary patch literal 234 zcmdPqXP983%h4GcjHB@A&4L5!Y^9!w=n@*p;o zCvypN977P3Crb%S977PZC#wfr2^(0B#gpBGql5#@X7c1L;f!MlV)f+m;4a~oXNYAE zkbszuOe!+O@|rM0L`wLA*h=_=*h>V0I7$R#*%TRqIN@?aL0sYtLEPdDIRYiZK|HZS z`iw!m;tWB2AdwuV5|JQ&afToP5U)fuNKl+1NC?api{%W^gP4y@Dl!BK2Z@-&Y?g?X z2rx#LhppwL5d);61iC3ASH_k28I+a1_l|1SUE)o zn91@CK}^bEtf^dO=aQeCom!-joS#>cn3z)DmYQ6doRg}Mo0ylFo|>DQSE7)X zU!)M4SDceylA(~6nU|_tQks{Um#(MDc#AtRFE76&u_QA;uh>tM@fJ%!Vo8Q3<1J1{ zhCgUx>(r63#t^^*QiqD;7%a*e7*hB^K^??Zz!<|1lMiEr<)jQo0`BKV*o<(0 z5RWp0Chsq0r_#)v6orzEROGa(07)YXnYoGSsd`m%U`3gE=_qQzl2!_;#kbf}z!~Ef zYZAz?Urf4bRZO}iw;1(8+*UH(Vkt?jD7nQ7PSsV4Zi$&WsVNF2`3hjgm?l>7dxC5Q zOP7F*_tWIK#g>zwo}OBCizPEJE&mpuo2Q>^NT{D{yr-|DyX!5k;?ksoqWt94;^JG( zMWuPS_+9*+yMfq2(!7$)+*H@1qWmIFCQx!MF3l;q z#h#m5T%4GmS`3O~1%)DB1_p*(++bJ6gZ)~h3`=Nf;AHcJg{$4W(ffjc!4|zMERJ`0 z#rxyB;%;yXPB5S0vP6G_$sV~A!WTGvZ}1A<;1mAB&cesm;P!!wfrIZlyTnCyi5ZSp z*kx}BnRc+>;ggzSewk1C0*CS)n4yYG?5?pG6@eoB7Dq-}d~zZv1VK3ngh9ywlWA>=`qNIfyBUC5S1AHHb~0MVUd9y$Gc57EelINn&woNqj+KNk$PU8Q$W`FD)r3 zEs0ObEGklAU|=W$Ws@RM3NGSiU|`T>ECQuaNG1k{4mjt536Sz)3$Qc3Gng~FGkye< z4}`=%u!6WB*cb#wKd^(D91Q%zA2`8GE(SKPkK7<8*q`t`3UZDhERRkA*TP{80Wg1q z{0_yia=L&aj1jLqV;I)F4b!g)2MOp27O=#CN=~q704TJd0el2B&QluUGBWMBv?&BG`<;Z>F%7nfsEda)Ikf`Wnqa&EIy@W?MN zQ2=E;P&tTR_Id^T`}x2$=z(=3GA6R#@{H7?RIvReMWFUbiUPO-S18ZS$x%p3RVYp@ zOHF|o04qO`wU^{8c$B86XXd55B_^jr^@2+o1&yT4ycFHs{L;JLPcJC7P0Y!u;?mX4EJ(J^$S*FbV$vlc=%=IMf4TE(Tgxrs%Y zRjEZ)GRO*{F{+nUoS&CtRg_=FsjCYWtCB!65Gn>ySH+{N3$Z~rC9|kX4M`nD7`w%C z!6k`BCFm6}Jf&3$C@4S!(h8K=REt3({@~IaETEuT>{bNIHdX3wh!!bQLpHS{6BN_s znI#ztph{8!U14rPPHIW2o`Nr^i3mwqptP%6?5D|li@hkdI6tQ>^%i?(aXhGDc}vhO zGbhzAzr-!SG%p2Q(cWUs%}&WIy2V>ul3x%HYs}r^bN2TOarE?a4T|@3^mQ$g2Gu*D zELtQ5Vu6~R;7Bi$2XPfZ)eu{e8i)-lvLV%!5~!kPPtMQHP0ULvQUtXGL5(vN6%_?o zHO^NI3P8BEMMew^4B$G;ixJeO{UF02qHtYU{i3k?Wns+*pBsYG*9Da?3MyX~RBiBj zz%72ATmB-q{B>@{OWcY#MATRCEU3F6WCp6ZdXfMHZ16AuC)i zu!vk?F}%Yf*y8_yU;H}1{6&8G%lwKB9yeHc+Wi{+W|*u{Y4E$kqW^)7fm84X2k#9I zfjb;xH#k&paERRC;JLvq+@WsjZ_5z0;m?z$$cR|AB0*5JBi|`E& zr5hl<1|O7#_}LoVzi2S9azk4tU{^q!DM|}`P`O@LSoEJjS}il&SEy`Ay~1nL;C6$Z z7rA{RV!gxdx{dEe8{Z4oeiwxNL7v8Jtyu1`y2j!Lsl`F{GzfzVT2O5WZli!=vVj46 z8wJ#PC<4`}1lt*LpvVO!45^c0^%PP1*@HNcOkhDbA&66%L6hs3R&Ysv zfkI*)sQAszFM|~Lp!@^vS0Zw;UX>h3Z2``50$hgPVgcpA5I0SxTYSlh1tq0Lsqv7q zrwBAOaf_)aw+K`h`)RTkVatdHpv(tq4}t?5(hk%IiGc`k{5XPx8qqoiCpx}%zb3y4 z9y8LGEl;Ri2#dbX5(74!ynd)AGq{NhZoXD= z6_*q#)F^y6lKnWI{v{r!qig6hl$h8R%jfy9xAu^5#ZG?|J( z&3r#iMsQCSoE^Z1K{7EY;TM63Qjq1K&PxNs7d8e~fi_>TswzY5<2IlM4t#Vb9>gn7 zErB+7%#lKK=}*QHG&MT1aaxJDl=$ugG+;3 z>`<3gnZpO$qCvxNItnF~1*t`P{so{ms|F}$ZBbfD;P$w>W^56t#Zl!-gzj)i!#p3{ zP6B5ulm+*qFkdf^J2ZLZ3;|ERjQknj&?z;oWpZS33q&K7$Pp zV+dmmV+vwMRL()5fe_XzEl~drxx~xNE6z+wRY-)5f5BVWdR6+W8i~cppm8J3Vg*$V zaNvP@Az&VaVWps|0UblqEY@VY#R^sg9yHNpMr*YCX|mj62Muav=B3}_PD(7!Om@!C zOUq0z0(k}8KGkF?as?HipkxPW=(>S|9~5kAYHDhbObhA|fy>ltkQ^j$e34*a<+;ux zeu+hVM&<>B#H%bxcUXk3u!!7Y5xOBNb3<76hM?qkb{1A>M1YD55C#Pnv?w7w*h^eF zHUZ=+P?-X1uQ4%-fC?~?jubEjNd+KFVHj#T>t}-Hm?;AT1E?GWwK>qsF*8u0frx&P z<)Fk3DaVA`{2^9?{0hP#YoS(B%fHh=R>J(t#wh-gfsIidjZN0A0=5NbjB3ZV?lQe=o@3}S{&`eZN? zGC7O^VR8_t7GbF}2+qhaC!zhI=U1ifm#+YAHJ}Xn!gH8H8mOxXu3SMKP)(L1P)&b} z6>{$$c3;p!o_ z%)Amk&>RzUVnG2~7P-ZooRe7;&cMI`b_=MWY+!i8!rtZzw*MAKe0*+VW?p=}ru;4T z`1q9k5$jLcS23F?9rfzoMF0H|-S z1eu1?L!O4xgU5Wa9!M*w*t^AgE{g z6B6&?7Vqrn?BNP&(SW-3Mc^_4l6b%&QWOSqs3C{|g*v#jECQ7ekVFFt2K4aZu*uC& zDa}c>D@tNuU;w3qVkJffh7Zh)jEpxKT%Iy0J!as$%^-M}LEB6JfuKSI-wi?a2EH3YferjOg~cyOXj~T7yucvvm5rOx_=5~+-1`Fu zgP7WP@M!qg5=Kyu_A4J3qvi)G26mqI?56As;#ODKtQ(j=urV;Pwo5lkH_LuuVqz5f z%*?>V{{h7Mzy+6i=EblLsT)M>d8?##qK191fqEnIak4KCm&wFy=7caB%wu zs*IufLDjMvTtCQU5vU9kqX?48Uzpe!#i256jN)H~m>9V}a51p(bW~komb?i{Iv<%C KSfq+T(-i>z3b`); literal 0 HcmV?d00001 diff --git a/prompt_bench/containers/__pycache__/lib.cpython-314.pyc b/prompt_bench/containers/__pycache__/lib.cpython-314.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9c8dd10d6dc2f8022156b1d1b5cdc84279cb1c32 GIT binary patch literal 1450 zcmdPqwZZ#3!ZZC1>a-=jW9qX6B_973=3@Cg~Ma-r|Xm zhq^R1KE8;Xfq{XOfq|h|4D7b=41A0!j32?|3>FCe8RUm#5COyN5D&3_j$;IcX$(x9 zApqojI1j>5WYA*>;_wG~k}-%Wh}oP4B3{4{#t;ByLqs5C9D_wU14D`-C~#ufAYu#* z0U$@hI8Zu>C5#~e9P>~CC>6j9Wgt-|P__xwh4KtRtjZu4@IfVzD17=07{eIVp=wj~ zh;SoY7$d??0{&r#r89`@KrYnetTGEvEXq^JOjB?V2-VFcCUX^6URh>JW}O!)GZE|s$PF{XSYGO)iktWA2uHw=psLET+MWuPSxF8uSIX@-!7F$k!dU|Tn zE%x%nB2YlxVk<65Ni8b6#adialvx1g=9iXeGJ&#TacNG;El#Lesl~;h)T5wK1WI(b z_`snUpI%TJpH`HbS|kn2N{!&G^n_QuKdvk87DvPlap@bpf;aetzi_hhu{F4T;9=n4 zyUs3gkzHbj;}v$zWqK*G$o4_j^K{q-0ZU~4?=bgxV zT|oJwfbwMl)dtV|>;iB-CL1!Za5%uZN!K`%Ke94#2o~`$Fff3^B0fH?6r2R&i$G=J zEq0LqGxO4m1Q{3@ZgHlkmiRydgd-jttnu+hA`A=+VDA)x%0sY85HEAsJq2a42#R0Dh+Jk*+69q*c1R8 CN>NGx literal 0 HcmV?d00001 diff --git a/prompt_bench/containers/finetune.py b/prompt_bench/containers/finetune.py new file mode 100644 index 0000000..cc20ae6 --- /dev/null +++ b/prompt_bench/containers/finetune.py @@ -0,0 +1,165 @@ +"""Docker container lifecycle management for Unsloth fine-tuning.""" + +from __future__ import annotations + +import logging +import subprocess +from pathlib import Path +from typing import Annotated + +import typer + +from python.prompt_bench.containers.lib import check_gpu_free + +logger = logging.getLogger(__name__) + +CONTAINER_NAME = "bill-finetune" +FINETUNE_IMAGE = "bill-finetune:latest" +DOCKERFILE_PATH = "/home/richie/dotfiles/python/prompt_bench/Dockerfile.finetune" +DEFAULT_HF_CACHE = Path("/zfs/models/hf") + + +def build_image() -> None: + """Build the fine-tuning Docker image.""" + logger.info("Building fine-tuning image: %s", FINETUNE_IMAGE) + result = subprocess.run( + ["docker", "build", "-f", DOCKERFILE_PATH, "-t", FINETUNE_IMAGE, "."], + text=True, + check=False, + ) + if result.returncode != 0: + message = "Failed to build fine-tuning image" + raise RuntimeError(message) + logger.info("Image built: %s", FINETUNE_IMAGE) + + +def start_finetune( + *, + dataset_path: Path, + output_dir: Path, + hf_cache: Path = DEFAULT_HF_CACHE, +) -> None: + """Run the fine-tuning container. + + Args: + dataset_path: Host path to the fine-tuning JSONL dataset. + output_dir: Host path where the trained model will be saved. + hf_cache: Host path to HuggingFace model cache (bind-mounted to avoid re-downloading). + validation_split: Fraction of data held out for validation. + """ + dataset_path = dataset_path.resolve() + output_dir = output_dir.resolve() + + if not dataset_path.is_file(): + message = f"Dataset not found: {dataset_path}" + raise FileNotFoundError(message) + + output_dir.mkdir(parents=True, exist_ok=True) + stop_finetune() + + hf_cache = hf_cache.resolve() + hf_cache.mkdir(parents=True, exist_ok=True) + + command = [ + "docker", + "run", + "--name", + CONTAINER_NAME, + "--device=nvidia.com/gpu=all", + "--ipc=host", + "-v", + f"{hf_cache}:/root/.cache/huggingface", + "-v", + f"{output_dir}:/workspace/output/qwen-bill-summarizer", + "-v", + f"{dataset_path}:/workspace/dataset.jsonl:ro", + FINETUNE_IMAGE, + "--dataset", + "/workspace/dataset.jsonl", + "--output-dir", + "/workspace/output/qwen-bill-summarizer", + ] + + logger.info("Starting fine-tuning container") + logger.info(" Dataset: %s", dataset_path) + logger.info(" Output: %s", output_dir) + + result = subprocess.run(command, text=True, check=False) + if result.returncode != 0: + message = f"Fine-tuning container exited with code {result.returncode}" + raise RuntimeError(message) + logger.info("Fine-tuning complete. Model saved to %s", output_dir) + + +def stop_finetune() -> None: + """Stop and remove the fine-tuning container.""" + logger.info("Stopping fine-tuning container") + subprocess.run(["docker", "stop", CONTAINER_NAME], capture_output=True, check=False) + subprocess.run(["docker", "rm", "-f", CONTAINER_NAME], capture_output=True, check=False) + + +def logs_finetune() -> str | None: + """Return recent logs from the fine-tuning container, or None if not running.""" + result = subprocess.run( + ["docker", "logs", "--tail", "50", CONTAINER_NAME], + capture_output=True, + text=True, + check=False, + ) + if result.returncode != 0: + return None + return result.stdout + result.stderr + + +app = typer.Typer(help="Fine-tuning container management.") + + +@app.command() +def build() -> None: + """Build the fine-tuning Docker image.""" + build_image() + + +@app.command() +def run( + dataset: Annotated[Path, typer.Option(help="Fine-tuning JSONL")] = Path( + "/home/richie/dotfiles/data/finetune_dataset.jsonl" + ), + output_dir: Annotated[Path, typer.Option(help="Where to save the trained model")] = Path( + "/home/richie/dotfiles/data/output/qwen-bill-summarizer", + ), + hf_cache: Annotated[Path, typer.Option(help="Host path to HuggingFace model cache")] = DEFAULT_HF_CACHE, + log_level: Annotated[str, typer.Option(help="Log level")] = "INFO", +) -> None: + """Run fine-tuning inside a Docker container.""" + logging.basicConfig(level=log_level, format="%(asctime)s %(levelname)s %(name)s: %(message)s") + check_gpu_free() + start_finetune( + dataset_path=dataset, + output_dir=output_dir, + hf_cache=hf_cache, + ) + +@app.command() +def stop() -> None: + """Stop and remove the fine-tuning container.""" + stop_finetune() + + +@app.command() +def logs() -> None: + """Show recent logs from the fine-tuning container.""" + output = logs_finetune() + if output is None: + typer.echo("No running fine-tuning container found.") + raise typer.Exit(code=1) + typer.echo(output) + + +def cli() -> None: + """Typer entry point.""" + app() + + +if __name__ == "__main__": + cli() diff --git a/prompt_bench/containers/lib.py b/prompt_bench/containers/lib.py new file mode 100644 index 0000000..b1ba5c4 --- /dev/null +++ b/prompt_bench/containers/lib.py @@ -0,0 +1,23 @@ +from __future__ import annotations + +import logging +import subprocess + +logger = logging.getLogger(__name__) + + +def check_gpu_free() -> None: + """Warn if GPU-heavy processes (e.g. Ollama) are running.""" + result = subprocess.run( + ["nvidia-smi", "--query-compute-apps=pid,process_name", "--format=csv,noheader"], + capture_output=True, + text=True, + check=False, + ) + if result.returncode != 0: + logger.warning("Could not query GPU processes: %s", result.stderr.strip()) + return + processes = result.stdout.strip() + if processes: + logger.warning("GPU processes detected:\n%s", processes) + logger.warning("Consider stopping Ollama (sudo systemctl stop ollama) before benchmarking") diff --git a/prompt_bench/containers/vllm.py b/prompt_bench/containers/vllm.py new file mode 100644 index 0000000..33e7e31 --- /dev/null +++ b/prompt_bench/containers/vllm.py @@ -0,0 +1,70 @@ +"""Docker container lifecycle management for vLLM.""" + +from __future__ import annotations + +import logging +import subprocess + +logger = logging.getLogger(__name__) + +CONTAINER_NAME = "vllm-bench" +VLLM_IMAGE = "vllm/vllm-openai:v0.19.0" + + +def start_vllm( + *, + model: str, + port: int, + model_dir: str, + gpu_memory_utilization: float, +) -> None: + """Start a vLLM container serving the given model. + + Args: + model: HuggingFace model directory name (relative to model_dir). + port: Host port to bind. + model_dir: Host path containing HuggingFace model directories. + gpu_memory_utilization: Fraction of GPU memory to use (0-1). + """ + command = [ + "docker", + "run", + "-d", + "--name", + CONTAINER_NAME, + "--device=nvidia.com/gpu=all", + "--ipc=host", + "-v", + f"{model_dir}:/models", + "-p", + f"{port}:8000", + VLLM_IMAGE, + "--model", + f"/models/{model}", + "--served-model-name", + model, + "--gpu-memory-utilization", + str(gpu_memory_utilization), + "--max-model-len", + "4096", + ] + logger.info("Starting vLLM container with model: %s", model) + stop_vllm() + result = subprocess.run(command, capture_output=True, text=True, check=False) + if result.returncode != 0: + msg = f"Failed to start vLLM container: {result.stderr.strip()}" + raise RuntimeError(msg) + logger.info("vLLM container started: %s", result.stdout.strip()[:12]) + + +def stop_vllm() -> None: + """Stop and remove the vLLM benchmark container.""" + logger.info("Stopping vLLM container") + subprocess.run(["docker", "stop", CONTAINER_NAME], capture_output=True, check=False) + subprocess.run(["docker", "rm", "-f", CONTAINER_NAME], capture_output=True, check=False) + subprocess.run( + ["docker", "network", "disconnect", "-f", "bridge", CONTAINER_NAME], + capture_output=True, + check=False, + ) + logger.info("vLLM container stopped and removed") diff --git a/prompt_bench/downloader.py b/prompt_bench/downloader.py new file mode 100644 index 0000000..8710b9e --- /dev/null +++ b/prompt_bench/downloader.py @@ -0,0 +1,75 @@ +"""HuggingFace model downloader.""" + +from __future__ import annotations + +import logging +from pathlib import Path +from typing import Annotated + +import typer +from huggingface_hub import snapshot_download + +from python.prompt_bench.models import BenchmarkConfig + +logger = logging.getLogger(__name__) + + +def local_model_path(repo: str, model_dir: str) -> Path: + """Return the local directory path for a HuggingFace repo.""" + return Path(model_dir) / repo + + +def is_model_present(repo: str, model_dir: str) -> bool: + """Check if a model has already been downloaded.""" + path = local_model_path(repo, model_dir) + return path.exists() and any(path.iterdir()) + + +def download_model(repo: str, model_dir: str) -> Path: + """Download a HuggingFace model to the local model directory. + + Skips the download if the model directory already exists and contains files. + """ + local_path = local_model_path(repo, model_dir) + + if is_model_present(repo, model_dir): + logger.info("Model already exists: %s", local_path) + return local_path + + logger.info("Downloading model: %s -> %s", repo, local_path) + snapshot_download( + repo_id=repo, + local_dir=str(local_path), + ) + logger.info("Download complete: %s", repo) + return local_path + + +def download_all(config: BenchmarkConfig) -> None: + """Download every model listed in the config, top to bottom.""" + for repo in config.models: + download_model(repo, config.model_dir) + + +def main( + config: Annotated[Path, typer.Option(help="Path to TOML config file")] = Path("bench.toml"), + log_level: Annotated[str, typer.Option(help="Log level")] = "INFO", +) -> None: + """Download all models listed in the benchmark config.""" + logging.basicConfig(level=log_level, format="%(asctime)s %(levelname)s %(name)s: %(message)s") + + if not config.is_file(): + message = f"Config file does not exist: {config}" + raise typer.BadParameter(message) + + benchmark_config = BenchmarkConfig.from_toml(config) + download_all(benchmark_config) + + +def cli() -> None: + """Typer entry point.""" + typer.run(main) + + +if __name__ == "__main__": + cli() diff --git a/prompt_bench/finetune.py b/prompt_bench/finetune.py new file mode 100644 index 0000000..3bcea4a --- /dev/null +++ b/prompt_bench/finetune.py @@ -0,0 +1,214 @@ +"""Fine-tune Qwen 3.5 4B on bill summarization data using Unsloth. + +Loads a ChatML-style JSONL dataset (system/user/assistant messages), +applies QLoRA with 4-bit quantization, and saves the merged model +in HuggingFace format. Designed for a single RTX 3090 (24GB). + +Usage: + python -m python.prompt_bench.finetune \ + --dataset output/finetune_dataset.jsonl \ + --output-dir output/qwen-bill-summarizer +""" + +from __future__ import annotations + +import json +import logging +from dataclasses import dataclass +from pathlib import Path +from typing import Annotated + +import tomllib +import typer +from unsloth import FastLanguageModel +from datasets import Dataset +from transformers import TrainingArguments +from trl import SFTTrainer + +logger = logging.getLogger(__name__) + + +@dataclass +class LoraConfig: + """LoRA adapter hyperparameters.""" + + rank: int + alpha: int + dropout: float + targets: list[str] + + +@dataclass +class TrainingConfig: + """Training loop hyperparameters.""" + + learning_rate: float + epochs: int + batch_size: int + gradient_accumulation: int + max_seq_length: int + warmup_ratio: float + weight_decay: float + logging_steps: int + save_steps: int + + +@dataclass +class FinetuneConfig: + """Top-level finetune configuration.""" + + base_model: str + lora: LoraConfig + training: TrainingConfig + + @classmethod + def from_toml(cls, config_path: Path) -> FinetuneConfig: + """Load finetune config from a TOML file.""" + raw = tomllib.loads(config_path.read_text())["finetune"] + return cls( + base_model=raw["base_model"], + lora=LoraConfig(**raw["lora"]), + training=TrainingConfig(**raw["training"]), + ) + + +def _messages_to_chatml(messages: list[dict]) -> str: + r"""Convert a message list to Qwen ChatML format. + + Produces: + <|im_start|>system\n...\n<|im_end|> + <|im_start|>user\n...\n<|im_end|> + <|im_start|>assistant\n...\n<|im_end|> + """ + parts = [] + for message in messages: + role = message["role"] + content = message["content"] + parts.append(f"<|im_start|>{role}\n{content}<|im_end|>") + return "\n".join(parts) + + +def load_dataset_from_jsonl(path: Path) -> Dataset: + """Load a ChatML JSONL file into a HuggingFace Dataset. + + Each line must have {"messages": [{"role": ..., "content": ...}, ...]}. + Pre-formats into a `text` column with the Qwen ChatML template applied, + which SFTTrainer consumes directly. + """ + records = [] + with path.open(encoding="utf-8") as handle: + for raw_line in handle: + stripped = raw_line.strip() + if stripped: + entry = json.loads(stripped) + records.append({"text": _messages_to_chatml(entry["messages"])}) + logger.info("Loaded %d examples from %s", len(records), path) + return Dataset.from_list(records) + + +def main( + dataset_path: Annotated[Path, typer.Option("--dataset", help="Fine-tuning JSONL")] = Path( + "output/finetune_dataset.jsonl", + ), + validation_split: Annotated[float, typer.Option("--val-split", help="Fraction held out for validation")] = 0.1, + output_dir: Annotated[Path, typer.Option("--output-dir", help="Where to save the merged model")] = Path( + "output/qwen-bill-summarizer", + ), + config_path: Annotated[ + Path, + typer.Option("--config", help="TOML config file"), + ] = Path(__file__).parent / "config.toml", + save_gguf: Annotated[bool, typer.Option("--save-gguf/--no-save-gguf", help="Also save GGUF")] = False, +) -> None: + """Fine-tune Qwen 3.5 4B on bill summarization with Unsloth + QLoRA.""" + logging.basicConfig(level="INFO", format="%(asctime)s %(levelname)s %(name)s: %(message)s") + + if not dataset_path.is_file(): + message = f"Dataset not found: {dataset_path}" + raise typer.BadParameter(message) + + config = FinetuneConfig.from_toml(config_path) + + logger.info("Loading base model: %s", config.base_model) + model, tokenizer = FastLanguageModel.from_pretrained( + model_name=config.base_model, + max_seq_length=config.training.max_seq_length, + load_in_4bit=True, + dtype=None, + ) + + logger.info("Applying LoRA (rank=%d, alpha=%d)", config.lora.rank, config.lora.alpha) + model = FastLanguageModel.get_peft_model( + model, + r=config.lora.rank, + lora_alpha=config.lora.alpha, + lora_dropout=config.lora.dropout, + target_modules=config.lora.targets, + bias="none", + use_gradient_checkpointing="unsloth", + random_state=42, + ) + + full_dataset = load_dataset_from_jsonl(dataset_path) + split = full_dataset.train_test_split(test_size=validation_split, seed=42) + train_dataset = split["train"] + validation_dataset = split["test"] + logger.info("Split: %d train, %d validation", len(train_dataset), len(validation_dataset)) + training_args = TrainingArguments( + output_dir=str(output_dir / "checkpoints"), + num_train_epochs=config.training.epochs, + per_device_train_batch_size=config.training.batch_size, + gradient_accumulation_steps=config.training.gradient_accumulation, + learning_rate=config.training.learning_rate, + warmup_ratio=config.training.warmup_ratio, + weight_decay=config.training.weight_decay, + lr_scheduler_type="cosine", + logging_steps=config.training.logging_steps, + save_steps=config.training.save_steps, + save_total_limit=3, + eval_strategy="steps", + eval_steps=config.training.save_steps, + load_best_model_at_end=True, + bf16=True, + optim="adamw_8bit", + seed=42, + report_to="none", + ) + + trainer = SFTTrainer( + model=model, + tokenizer=tokenizer, + train_dataset=train_dataset, + eval_dataset=validation_dataset, + args=training_args, + max_seq_length=config.training.max_seq_length, + packing=True, + ) + + logger.info( + "Starting training: %d train, %d val, %d epochs", + len(train_dataset), + len(validation_dataset), + config.training.epochs, + ) + trainer.train() + + merged_path = str(output_dir / "merged") + logger.info("Saving merged model to %s", merged_path) + model.save_pretrained_merged(merged_path, tokenizer, save_method="merged_16bit") + + if save_gguf: + gguf_path = str(output_dir / "gguf") + logger.info("Saving GGUF to %s", gguf_path) + model.save_pretrained_gguf(gguf_path, tokenizer, quantization_method="q4_k_m") + + logger.info("Done! Model saved to %s", output_dir) + + +def cli() -> None: + """Typer entry point.""" + typer.run(main) + + +if __name__ == "__main__": + cli() diff --git a/prompt_bench/input/1.txt b/prompt_bench/input/1.txt new file mode 100644 index 0000000..ca816cb --- /dev/null +++ b/prompt_bench/input/1.txt @@ -0,0 +1 @@ +how many oceans are there in the world \ No newline at end of file diff --git a/prompt_bench/input/2.txt b/prompt_bench/input/2.txt new file mode 100644 index 0000000..579812e --- /dev/null +++ b/prompt_bench/input/2.txt @@ -0,0 +1 @@ +whos the president of the united states \ No newline at end of file diff --git a/prompt_bench/input/3.txt b/prompt_bench/input/3.txt new file mode 100644 index 0000000..88f52f8 --- /dev/null +++ b/prompt_bench/input/3.txt @@ -0,0 +1 @@ +whats the greatest country in the world \ No newline at end of file diff --git a/prompt_bench/input/4.txt b/prompt_bench/input/4.txt new file mode 100644 index 0000000..bc2cd10 --- /dev/null +++ b/prompt_bench/input/4.txt @@ -0,0 +1 @@ +was/is the usa the greatest country in the world \ No newline at end of file diff --git a/prompt_bench/main.py b/prompt_bench/main.py new file mode 100644 index 0000000..2d6a725 --- /dev/null +++ b/prompt_bench/main.py @@ -0,0 +1,215 @@ +"""CLI entry point for the prompt benchmarking system.""" + +from __future__ import annotations + +import json +import logging +import time +from concurrent.futures import ThreadPoolExecutor, as_completed +from pathlib import Path +from typing import Annotated + +import typer + +from python.prompt_bench.containers.lib import check_gpu_free +from python.prompt_bench.containers.vllm import start_vllm, stop_vllm +from python.prompt_bench.downloader import is_model_present +from python.prompt_bench.models import BenchmarkConfig +from python.prompt_bench.vllm_client import VLLMClient + +logger = logging.getLogger(__name__) + + +def discover_prompts(input_dir: Path) -> list[Path]: + """Find all .txt files in the input directory.""" + prompts = list(input_dir.glob("*.txt")) + if not prompts: + message = f"No .txt files found in {input_dir}" + raise FileNotFoundError(message) + return prompts + + +def _run_prompt( + client: VLLMClient, + prompt_path: Path, + *, + repo: str, + model_dir_name: str, + model_output: Path, + temperature: float, +) -> tuple[bool, float]: + """Run a single prompt. Returns (success, elapsed_seconds).""" + filename = prompt_path.name + output_path = model_output / filename + start = time.monotonic() + try: + prompt_text = prompt_path.read_text() + response = client.complete(prompt_text, model_dir_name, temperature=temperature) + output_path.write_text(response) + elapsed = time.monotonic() - start + logger.info("Completed: %s / %s in %.2fs", repo, filename, elapsed) + except Exception: + elapsed = time.monotonic() - start + error_path = model_output / f"{filename}.error" + logger.exception("Failed: %s / %s after %.2fs", repo, filename, elapsed) + error_path.write_text(f"Error processing {filename}") + return False, elapsed + return True, elapsed + + +def benchmark_model( + client: VLLMClient, + prompts: list[Path], + *, + repo: str, + model_dir_name: str, + model_output: Path, + temperature: float, + concurrency: int, +) -> tuple[int, int]: + """Run all prompts against a single model in parallel. + + vLLM batches concurrent requests internally, so submitting many at once is + significantly faster than running them serially. + """ + pending = [prompt for prompt in prompts if not (model_output / prompt.name).exists()] + skipped = len(prompts) - len(pending) + if skipped: + logger.info("Skipping %d prompts with existing output for %s", skipped, repo) + + if not pending: + logger.info("Nothing to do for %s", repo) + return 0, 0 + + completed = 0 + failed = 0 + latencies: list[float] = [] + + wall_start = time.monotonic() + with ThreadPoolExecutor(max_workers=concurrency) as executor: + futures = [ + executor.submit( + _run_prompt, + client, + prompt_path, + repo=repo, + model_dir_name=model_dir_name, + model_output=model_output, + temperature=temperature, + ) + for prompt_path in pending + ] + for future in as_completed(futures): + success, elapsed = future.result() + latencies.append(elapsed) + if success: + completed += 1 + else: + failed += 1 + wall_elapsed = time.monotonic() - wall_start + + attempted = completed + failed + avg_latency = sum(latencies) / attempted + throughput = attempted / wall_elapsed if wall_elapsed > 0 else 0.0 + timing = { + "repo": repo, + "wall_seconds": wall_elapsed, + "attempted": attempted, + "completed": completed, + "failed": failed, + "avg_latency_seconds": avg_latency, + "throughput_prompts_per_second": throughput, + "concurrency": concurrency, + } + timing_path = model_output / "_timing.json" + timing_path.write_text(json.dumps(timing, indent=2)) + + return completed, failed + + +def run_benchmark( + config: BenchmarkConfig, + input_dir: Path, + output_dir: Path, +) -> None: + """Execute the benchmark across all models and prompts.""" + prompts = discover_prompts(input_dir) + logger.info("Found %d prompts in %s", len(prompts), input_dir) + + check_gpu_free() + + total_completed = 0 + total_failed = 0 + + for repo in config.models: + if not is_model_present(repo, config.model_dir): + logger.warning("Skipping (not downloaded): %s", repo) + continue + + model_output = output_dir / repo + model_output.mkdir(parents=True, exist_ok=True) + + logger.info("=== Benchmarking model: %s ===", repo) + + stop_vllm() + try: + start_vllm( + model=repo, + port=config.port, + model_dir=config.model_dir, + gpu_memory_utilization=config.gpu_memory_utilization, + ) + except RuntimeError: + logger.exception("Failed to start vLLM for %s, skipping", repo) + continue + logger.info("vLLM started for %s", repo) + try: + with VLLMClient(port=config.port, timeout=config.timeout) as client: + client.wait_ready(max_wait=config.vllm_startup_timeout) + completed, failed = benchmark_model( + client, + prompts, + repo=repo, + model_dir_name=repo, + model_output=model_output, + temperature=config.temperature, + concurrency=config.concurrency, + ) + total_completed += completed + total_failed += failed + finally: + stop_vllm() + + logger.info("=== Benchmark complete ===") + logger.info("Completed: %d | Failed: %d", total_completed, total_failed) + + +def main( + input_dir: Annotated[Path, typer.Argument(help="Directory containing input .txt prompt files")], + config: Annotated[Path, typer.Option(help="Path to TOML config file")] = Path("bench.toml"), + output_dir: Annotated[Path, typer.Option(help="Output directory for results")] = Path("output"), + log_level: Annotated[str, typer.Option(help="Log level")] = "INFO", +) -> None: + """Run prompts through multiple LLMs via vLLM and save results.""" + logging.basicConfig(level=log_level, format="%(asctime)s %(levelname)s %(name)s: %(message)s") + + if not input_dir.is_dir(): + message = f"Input directory does not exist: {input_dir}" + raise typer.BadParameter(message) + if not config.is_file(): + message = f"Config file does not exist: {config}" + raise typer.BadParameter(message) + + benchmark_config = BenchmarkConfig.from_toml(config) + output_dir.mkdir(parents=True, exist_ok=True) + + run_benchmark(benchmark_config, input_dir, output_dir) + + +def cli() -> None: + """Typer entry point.""" + typer.run(main) + + +if __name__ == "__main__": + cli() diff --git a/prompt_bench/models.py b/prompt_bench/models.py new file mode 100644 index 0000000..c722aba --- /dev/null +++ b/prompt_bench/models.py @@ -0,0 +1,30 @@ +"""Pydantic models for benchmark configuration.""" + +from __future__ import annotations + +import tomllib +from typing import TYPE_CHECKING + +from pydantic import BaseModel + +if TYPE_CHECKING: + from pathlib import Path + + +class BenchmarkConfig(BaseModel): + """Top-level benchmark configuration loaded from TOML.""" + + models: list[str] + model_dir: str = "/zfs/models/hf" + port: int = 8000 + gpu_memory_utilization: float = 0.90 + temperature: float = 0.0 + timeout: int = 300 + concurrency: int = 4 + vllm_startup_timeout: int = 900 + + @classmethod + def from_toml(cls, config_path: Path) -> BenchmarkConfig: + """Load benchmark config from a TOML file.""" + raw = tomllib.loads(config_path.read_text())["bench"] + return cls(**raw) diff --git a/prompt_bench/summarization_prompts.py b/prompt_bench/summarization_prompts.py new file mode 100644 index 0000000..bfdd5a5 --- /dev/null +++ b/prompt_bench/summarization_prompts.py @@ -0,0 +1,34 @@ +SUMMARIZATION_SYSTEM_PROMPT = """You are a legislative analyst extracting policy substance from Congressional bill text. + +Your job is to compress a bill into a dense, neutral structured summary that captures every distinct policy action — including secondary effects that might be buried in subsections. + +EXTRACTION RULES: +- IGNORE: whereas clauses, congressional findings that are purely political statements, recitals, preambles, citations of existing law by number alone, and procedural boilerplate. +- FOCUS ON: operative verbs — what the bill SHALL do, PROHIBIT, REQUIRE, AUTHORIZE, AMEND, APPROPRIATE, or ESTABLISH. +- SURFACE ALL THREADS: If the bill touches multiple policy areas, list each thread separately. Do not collapse them. +- BE CONCRETE: Name the affected population, the mechanism, and the direction (expands/restricts/maintains). +- STAY NEUTRAL: No political framing. Describe what the text does, not what its sponsors claim it does. + +OUTPUT FORMAT — plain structured text, not JSON: + +OPERATIVE ACTIONS: +[Numbered list of what the bill actually does, one action per line, max 20 words each] + +AFFECTED POPULATIONS: +[Who gains something, who loses something, or whose behavior is regulated] + +MECHANISMS: +[How it works: new funding, mandate, prohibition, amendment to existing statute, grant program, study commission, etc.] + +POLICY THREADS: +[List each distinct policy domain this bill touches, even minor ones. Use plain language, not domain codes.] + +SYMBOLIC/PROCEDURAL ONLY: +[Yes or No — is this bill primarily a resolution, designation, or awareness declaration with no operative effect?] + +LENGTH TARGET: 150-250 words total. Be ruthless about cutting. Density over completeness.""" + +SUMMARIZATION_USER_TEMPLATE = """Summarize the following Congressional bill according to your instructions. + +BILL TEXT: +{text_content}""" diff --git a/prompt_bench/tools/build_finetune_dataset.py b/prompt_bench/tools/build_finetune_dataset.py new file mode 100644 index 0000000..e3594b8 --- /dev/null +++ b/prompt_bench/tools/build_finetune_dataset.py @@ -0,0 +1,114 @@ +"""Build a fine-tuning JSONL dataset from batch request + output files. + +Joins the original request JSONL (system + user messages) with the batch +output JSONL (assistant completions) by custom_id to produce a ChatML-style +messages JSONL suitable for fine-tuning. +""" + +from __future__ import annotations + +import json +import logging +from pathlib import Path +from typing import Annotated + +import typer + +logger = logging.getLogger(__name__) + +HTTP_OK = 200 + + +def load_requests(path: Path) -> dict[str, list[dict]]: + """Parse request JSONL into {custom_id: messages}.""" + results: dict[str, list[dict]] = {} + with path.open(encoding="utf-8") as handle: + for raw_line in handle: + stripped = raw_line.strip() + if not stripped: + continue + record = json.loads(stripped) + custom_id = record["custom_id"] + messages = record["body"]["messages"] + results[custom_id] = messages + return results + + +def load_completions(path: Path) -> dict[str, str]: + """Parse batch output JSONL into {custom_id: assistant_content}.""" + results: dict[str, str] = {} + with path.open(encoding="utf-8") as handle: + for line_number, raw_line in enumerate(handle, 1): + stripped = raw_line.strip() + if not stripped: + continue + record = json.loads(stripped) + custom_id = record["custom_id"] + response = record.get("response", {}) + if response.get("status_code") != HTTP_OK: + logger.warning("Skipping %s (line %d): status %s", custom_id, line_number, response.get("status_code")) + continue + body = response.get("body", {}) + choices = body.get("choices", []) + if not choices: + logger.warning("Skipping %s (line %d): no choices", custom_id, line_number) + continue + content = choices[0].get("message", {}).get("content", "") + if not content: + logger.warning("Skipping %s (line %d): empty content", custom_id, line_number) + continue + results[custom_id] = content + return results + + +def main( + requests_path: Annotated[Path, typer.Option("--requests", help="Batch request JSONL")] = Path( + "output/openai_batch/requests.jsonl", + ), + batch_output: Annotated[Path, typer.Option("--batch-output", help="Batch output JSONL")] = Path( + "batch_69d84558d91c819091d53f08d78f9fd6_output.jsonl", + ), + output_path: Annotated[Path, typer.Option("--output", help="Fine-tuning JSONL output")] = Path( + "output/finetune_dataset.jsonl", + ), + log_level: Annotated[str, typer.Option(help="Log level")] = "INFO", +) -> None: + """Build fine-tuning dataset by joining request and output JSONL files.""" + logging.basicConfig(level=log_level, format="%(asctime)s %(levelname)s %(name)s: %(message)s") + + logger.info("Loading requests from %s", requests_path) + requests = load_requests(requests_path) + logger.info("Loaded %d requests", len(requests)) + + logger.info("Loading completions from %s", batch_output) + completions = load_completions(batch_output) + logger.info("Loaded %d completions", len(completions)) + + output_path.parent.mkdir(parents=True, exist_ok=True) + matched = 0 + skipped = 0 + + with output_path.open("w", encoding="utf-8") as handle: + for custom_id, messages in requests.items(): + assistant_content = completions.get(custom_id) + if assistant_content is None: + skipped += 1 + continue + + example = { + "messages": [*messages, {"role": "assistant", "content": assistant_content}], + } + handle.write(json.dumps(example, ensure_ascii=False)) + handle.write("\n") + matched += 1 + + logger.info("Wrote %d examples to %s (skipped %d unmatched)", matched, output_path, skipped) + + +def cli() -> None: + """Typer entry point.""" + typer.run(main) + + +if __name__ == "__main__": + cli() diff --git a/prompt_bench/tools/count_tokens.py b/prompt_bench/tools/count_tokens.py new file mode 100644 index 0000000..fdc05de --- /dev/null +++ b/prompt_bench/tools/count_tokens.py @@ -0,0 +1,97 @@ +"""Sum token usage across compressed and uncompressed run directories.""" + +from __future__ import annotations + +import json +import logging +from dataclasses import dataclass, field +from pathlib import Path +from typing import Annotated + +import typer + +logger = logging.getLogger(__name__) + + +@dataclass +class UsageTotals: + """Aggregate usage counters for a directory of run records.""" + + files: int = 0 + errors: int = 0 + prompt_tokens: int = 0 + cached_tokens: int = 0 + completion_tokens: int = 0 + reasoning_tokens: int = 0 + total_tokens: int = 0 + per_file: list[tuple[str, int, int, int]] = field(default_factory=list) + + +def tally_directory(directory: Path) -> UsageTotals: + """Return aggregated usage stats for every JSON record in a directory.""" + totals = UsageTotals() + decoder = json.JSONDecoder() + for path in sorted(directory.glob("*.json")): + text = path.read_text().lstrip() + record, _ = decoder.raw_decode(text) + totals.files += 1 + usage = record.get("usage") + if not usage: + totals.errors += 1 + continue + prompt_tokens = usage.get("prompt_tokens", 0) + completion_tokens = usage.get("completion_tokens", 0) + total_tokens = usage.get("total_tokens", 0) + cached_tokens = (usage.get("prompt_tokens_details") or {}).get("cached_tokens", 0) + reasoning_tokens = (usage.get("completion_tokens_details") or {}).get("reasoning_tokens", 0) + totals.prompt_tokens += prompt_tokens + totals.completion_tokens += completion_tokens + totals.total_tokens += total_tokens + totals.cached_tokens += cached_tokens + totals.reasoning_tokens += reasoning_tokens + totals.per_file.append((path.name, prompt_tokens, completion_tokens, total_tokens)) + return totals + + +def log_totals(label: str, totals: UsageTotals) -> None: + """Log a one-block summary for a directory.""" + counted = totals.files - totals.errors + average_total = totals.total_tokens / counted if counted else 0 + logger.info("[%s]", label) + logger.info(" files : %d (with usage: %d, errors: %d)", totals.files, counted, totals.errors) + logger.info(" prompt tokens : %d", totals.prompt_tokens) + logger.info(" cached tokens : %d", totals.cached_tokens) + logger.info(" completion tok : %d", totals.completion_tokens) + logger.info(" reasoning tok : %d", totals.reasoning_tokens) + logger.info(" total tokens : %d", totals.total_tokens) + logger.info(" avg total/file : %.1f", average_total) + + +def main( + runs_dir: Annotated[Path, typer.Option("--runs-dir")] = Path("output/openai_runs_temp_1"), + log_level: Annotated[str, typer.Option("--log-level")] = "INFO", +) -> None: + """Print token usage totals for the compressed and uncompressed run directories.""" + logging.basicConfig(level=log_level, format="%(message)s") + + grand = UsageTotals() + for label in ("compressed", "uncompressed"): + directory = runs_dir / label + if not directory.is_dir(): + logger.warning("%s: directory not found at %s", label, directory) + continue + totals = tally_directory(directory) + log_totals(label, totals) + grand.files += totals.files + grand.errors += totals.errors + grand.prompt_tokens += totals.prompt_tokens + grand.cached_tokens += totals.cached_tokens + grand.completion_tokens += totals.completion_tokens + grand.reasoning_tokens += totals.reasoning_tokens + grand.total_tokens += totals.total_tokens + + log_totals("grand total", grand) + + +if __name__ == "__main__": + typer.run(main) diff --git a/prompt_bench/vllm_client.py b/prompt_bench/vllm_client.py new file mode 100644 index 0000000..b7d9045 --- /dev/null +++ b/prompt_bench/vllm_client.py @@ -0,0 +1,68 @@ +"""OpenAI-compatible client for vLLM's API.""" + +from __future__ import annotations + +import logging +import time +from typing import Self + +import httpx + +logger = logging.getLogger(__name__) + +READY_POLL_INTERVAL = 2.0 + + +class VLLMClient: + """Talk to a vLLM server via its OpenAI-compatible API. + + Args: + host: vLLM host. + port: vLLM port. + timeout: Per-request timeout in seconds. + """ + + def __init__(self, *, host: str = "localhost", port: int = 8000, timeout: int = 300) -> None: + """Create a client connected to a vLLM server.""" + self._client = httpx.Client(base_url=f"http://{host}:{port}", timeout=timeout) + + def wait_ready(self, max_wait: int) -> None: + """Poll /v1/models until the server is ready or timeout.""" + deadline = time.monotonic() + max_wait + while time.monotonic() < deadline: + try: + response = self._client.get("/v1/models") + if response.is_success: + logger.info("vLLM server is ready") + return + except httpx.TransportError: + pass + time.sleep(READY_POLL_INTERVAL) + msg = f"vLLM server not ready after {max_wait}s" + raise TimeoutError(msg) + + def complete(self, prompt: str, model: str, *, temperature: float = 0.0, max_tokens: int = 4096) -> str: + """Send a prompt to /v1/completions and return the response text.""" + payload = { + "model": model, + "prompt": prompt, + "temperature": temperature, + "max_tokens": max_tokens, + } + logger.info("Sending prompt to %s (%d chars)", model, len(prompt)) + response = self._client.post("/v1/completions", json=payload) + response.raise_for_status() + data = response.json() + return data["choices"][0]["text"] + + def close(self) -> None: + """Close the HTTP client.""" + self._client.close() + + def __enter__(self) -> Self: + """Enter the context manager.""" + return self + + def __exit__(self, *args: object) -> None: + """Close the HTTP client on exit.""" + self.close() diff --git a/pyprject.toml b/pyprject.toml new file mode 100644 index 0000000..e69de29