mirror of
https://github.com/frankwxu/mobile-pii-discovery-agent.git
synced 2026-02-20 13:40:41 +00:00
update RQs
This commit is contained in:
232
RQs/RQ4/RQ4_t11.ipynb
Normal file
232
RQs/RQ4/RQ4_t11.ipynb
Normal file
@@ -0,0 +1,232 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "ebe9dbb1",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\\begin{table*}\n",
|
||||
"\\centering\n",
|
||||
"\\small\n",
|
||||
"\\caption{Distribution of distinct PII entities discovered across evaluated methods and PII categories.}\n",
|
||||
"\\label{tab:model_yield}\n",
|
||||
"\\begin{tabular}{|l|l|l|l|l|l|l|l|}\n",
|
||||
"\\hline\n",
|
||||
"\\multicolumn{2}{|c|}{\\textbf{Method/LLM}} & \\textbf{Email} & \\textbf{Phone} & \\textbf{User Name} & \\textbf{Real Name} & \\textbf{Precision} & \\textbf{Recall} \\\\\n",
|
||||
"\\hline\n",
|
||||
"Gemini-2.5-Pro & 88.4\\% & 10 & 791 & 1664 & 1076 & 76.50\\% & 49.03\\% \\\\\n",
|
||||
"\\hline\n",
|
||||
"GPT-3.5-Turbo & xx & 4 & 13 & 0 & 276 & 24.23\\% & 1.29\\% \\\\\n",
|
||||
"\\hline\n",
|
||||
"GPT-4.1 & xx & 1289 & 680 & 531 & 683 & 33.30\\% & 19.19\\% \\\\\n",
|
||||
"\\hline\n",
|
||||
"GPT-4o-mini & 82.0\\% & 14 & 22 & 875 & 1928 & 39.98\\% & 20.54\\% \\\\\n",
|
||||
"\\hline\n",
|
||||
"GPT-5.1 & xx & 16 & 1184 & 1234 & 2154 & 51.20\\% & 42.52\\% \\\\\n",
|
||||
"\\hline\n",
|
||||
"LLaMA-3.1-8B-Instruct & xx & 0 & 0 & 2 & 15 & 88.24\\% & 0.27\\% \\\\\n",
|
||||
"\\hline\n",
|
||||
"LLaMA-3.1-70B-Instruct & xx & 6 & 6 & 34 & 15 & 39.34\\% & 0.43\\% \\\\\n",
|
||||
"\\hline\n",
|
||||
"Mistral-Large & xx & 2 & 989 & 2121 & 1 & 59.56\\% & 33.56\\% \\\\\n",
|
||||
"\\hline\n",
|
||||
"Mixtral-8x7B & xx & 6 & 6 & 2302 & 98 & 48.34\\% & 21.10\\% \\\\\n",
|
||||
"\\hline\n",
|
||||
"Mixtral-8x22B & xx & 0 & 6 & 58 & 15 & 25.32\\% & 0.36\\% \\\\\n",
|
||||
"\\hline\n",
|
||||
"Qwen2.5-72B & 82.3\\% & 961 & 45 & 528 & 1203 & 38.87\\% & 19.26\\% \\\\\n",
|
||||
"\\hline\n",
|
||||
"\\end{tabular}\n",
|
||||
"\\end{table*}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import json\n",
|
||||
"import re\n",
|
||||
"from pathlib import Path\n",
|
||||
"from typing import Dict, Set\n",
|
||||
"\n",
|
||||
"# =========================\n",
|
||||
"# CONFIG\n",
|
||||
"# =========================\n",
|
||||
"\n",
|
||||
"BASE_DIR = Path(r\"C:\\Users\\cyfij\\OneDrive\\Desktop\\DFRWS 2026\\Agent\\RQs\\normalized_results\")\n",
|
||||
"\n",
|
||||
"GT_PATH = BASE_DIR / \"ground_truth\" / \"corpus_level\" / \"corpus_level.jsonl\"\n",
|
||||
"\n",
|
||||
"METHODS = {\n",
|
||||
" \"Gemini-2.5-Pro\": {\"path\": \"gemini_2_5_pro\", \"bm\": \"88.4\\\\%\"},\n",
|
||||
" \"GPT-3.5-Turbo\": {\"path\": \"gpt_3_5_turbo\", \"bm\": \"xx\"},\n",
|
||||
" \"GPT-4.1\": {\"path\": \"gpt_4_1\", \"bm\": \"xx\"},\n",
|
||||
" \"GPT-4o-mini\": {\"path\": \"gpt_4o_mini\", \"bm\": \"82.0\\\\%\"},\n",
|
||||
" \"GPT-5.1\": {\"path\": \"gpt_5_1\", \"bm\": \"xx\"},\n",
|
||||
" \"LLaMA-3.1-8B-Instruct\": {\"path\": \"llama_3_1_8b\", \"bm\": \"xx\"},\n",
|
||||
" \"LLaMA-3.1-70B-Instruct\": {\"path\": \"llama_3_1_70b\", \"bm\": \"xx\"},\n",
|
||||
" \"Mistral-Large\": {\"path\": \"mistral_large\", \"bm\": \"xx\"},\n",
|
||||
" \"Mixtral-8x7B\": {\"path\": \"mixtral_8x7b\", \"bm\": \"xx\"},\n",
|
||||
" \"Mixtral-8x22B\": {\"path\": \"mixtral_8x22b\", \"bm\": \"xx\"},\n",
|
||||
" \"Qwen2.5-72B\": {\"path\": \"qwen_2_5_72b\", \"bm\": \"82.3\\\\%\"},\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"PII_ORDER = [\"EMAIL\", \"PHONE\", \"USERNAME\", \"PERSON_NAME\"]\n",
|
||||
"\n",
|
||||
"# =========================\n",
|
||||
"# CANONICALIZATION\n",
|
||||
"# =========================\n",
|
||||
"\n",
|
||||
"def canonicalize(val: str, pii_type: str) -> str:\n",
|
||||
" val = val.strip()\n",
|
||||
"\n",
|
||||
" if pii_type == \"EMAIL\":\n",
|
||||
" return val.lower()\n",
|
||||
"\n",
|
||||
" if pii_type == \"PHONE\":\n",
|
||||
" plus = val.startswith(\"+\")\n",
|
||||
" digits = re.sub(r\"\\D\", \"\", val)\n",
|
||||
" return \"+\" + digits if plus else digits\n",
|
||||
"\n",
|
||||
" if pii_type in {\"USERNAME\", \"PERSON_NAME\"}:\n",
|
||||
" val = val.lower()\n",
|
||||
" val = re.sub(r\"\\b(mr|ms|mrs|dr|prof)\\.?\\b\", \"\", val)\n",
|
||||
" val = re.sub(r\"[^\\w\\s]\", \"\", val)\n",
|
||||
" val = re.sub(r\"\\s+\", \" \", val)\n",
|
||||
" return val.strip()\n",
|
||||
"\n",
|
||||
" return val\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# =========================\n",
|
||||
"# LOAD CORPUS\n",
|
||||
"# =========================\n",
|
||||
"\n",
|
||||
"def load_corpus(path: Path) -> Dict[str, Set[str]]:\n",
|
||||
" data = {t: set() for t in PII_ORDER}\n",
|
||||
" if not path.exists():\n",
|
||||
" return data\n",
|
||||
"\n",
|
||||
" with path.open(\"r\", encoding=\"utf-8\") as f:\n",
|
||||
" for line in f:\n",
|
||||
" rec = json.loads(line)\n",
|
||||
" pii_type = (rec.get(\"PII_type\") or \"\").upper()\n",
|
||||
" if pii_type not in PII_ORDER:\n",
|
||||
" continue\n",
|
||||
"\n",
|
||||
" vals = rec.get(\"PII_unique\") or rec.get(\"PII_all\") or []\n",
|
||||
" canon_vals = {\n",
|
||||
" canonicalize(v, pii_type)\n",
|
||||
" for v in vals\n",
|
||||
" if isinstance(v, str) and v.strip()\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
" data[pii_type].update(canon_vals)\n",
|
||||
"\n",
|
||||
" return data\n",
|
||||
"\n",
|
||||
"# =========================\n",
|
||||
"# LOAD GT\n",
|
||||
"# =========================\n",
|
||||
"\n",
|
||||
"GT = load_corpus(GT_PATH)\n",
|
||||
"\n",
|
||||
"# =========================\n",
|
||||
"# COMPUTE TABLE\n",
|
||||
"# =========================\n",
|
||||
"\n",
|
||||
"rows = []\n",
|
||||
"\n",
|
||||
"for name, info in METHODS.items():\n",
|
||||
"\n",
|
||||
" corpus_path = BASE_DIR / info[\"path\"] / \"corpus_level\" / \"corpus_level.jsonl\"\n",
|
||||
" SYS = load_corpus(corpus_path)\n",
|
||||
"\n",
|
||||
" total_sys = 0\n",
|
||||
" total_gt = 0\n",
|
||||
" total_overlap = 0\n",
|
||||
" counts = {}\n",
|
||||
"\n",
|
||||
" for t in PII_ORDER:\n",
|
||||
"\n",
|
||||
" gt_set = GT[t]\n",
|
||||
" sys_set = SYS[t]\n",
|
||||
"\n",
|
||||
" counts[t] = len(sys_set)\n",
|
||||
"\n",
|
||||
" overlap = gt_set.intersection(sys_set)\n",
|
||||
"\n",
|
||||
" total_sys += len(sys_set)\n",
|
||||
" total_gt += len(gt_set)\n",
|
||||
" total_overlap += len(overlap)\n",
|
||||
"\n",
|
||||
" precision = (total_overlap / total_sys) if total_sys else 0.0\n",
|
||||
" recall = (total_overlap / total_gt) if total_gt else 0.0\n",
|
||||
"\n",
|
||||
" rows.append((\n",
|
||||
" name,\n",
|
||||
" info[\"bm\"],\n",
|
||||
" counts[\"EMAIL\"],\n",
|
||||
" counts[\"PHONE\"],\n",
|
||||
" counts[\"USERNAME\"],\n",
|
||||
" counts[\"PERSON_NAME\"],\n",
|
||||
" f\"{precision*100:.2f}\\\\%\",\n",
|
||||
" f\"{recall*100:.2f}\\\\%\",\n",
|
||||
" ))\n",
|
||||
"\n",
|
||||
"# =========================\n",
|
||||
"# EMIT LATEX\n",
|
||||
"# =========================\n",
|
||||
"\n",
|
||||
"print(r\"\\begin{table*}\")\n",
|
||||
"print(r\"\\centering\")\n",
|
||||
"print(r\"\\small\")\n",
|
||||
"print(r\"\\caption{Distribution of distinct PII entities discovered across evaluated methods and PII categories.}\")\n",
|
||||
"print(r\"\\label{tab:model_yield}\")\n",
|
||||
"print(r\"\\begin{tabular}{|l|l|l|l|l|l|l|l|}\")\n",
|
||||
"print(r\"\\hline\")\n",
|
||||
"print(r\"\\multicolumn{2}{|c|}{\\textbf{Method/LLM}} & \\textbf{Email} & \\textbf{Phone} & \\textbf{User Name} & \\textbf{Real Name} & \\textbf{Precision} & \\textbf{Recall} \\\\\")\n",
|
||||
"print(r\"\\hline\")\n",
|
||||
"\n",
|
||||
"for r in rows:\n",
|
||||
" print(\" & \".join(map(str, r)) + r\" \\\\\")\n",
|
||||
" print(r\"\\hline\")\n",
|
||||
"\n",
|
||||
"print(r\"\\end{tabular}\")\n",
|
||||
"print(r\"\\end{table*}\")\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "07108960",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.14.0"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
253
RQs/RQ4/RQ4_t12.ipynb
Normal file
253
RQs/RQ4/RQ4_t12.ipynb
Normal file
@@ -0,0 +1,253 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "03fbbc9f",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"CSV path: C:\\Users\\cyfij\\OneDrive\\Desktop\\DFRWS 2026\\Agent\\RQs\\RQ2\\app_total_columns.csv\n",
|
||||
"Exists: True\n",
|
||||
"Using BASE_DIR: C:\\Users\\cyfij\\OneDrive\\Desktop\\DFRWS 2026\\Agent\\RQs\n",
|
||||
"Using CSV: C:\\Users\\cyfij\\OneDrive\\Desktop\\DFRWS 2026\\Agent\\RQs\\RQ2\\app_total_columns.csv\n",
|
||||
"CSV exists: True\n",
|
||||
"\\begin{tabular}{|l|p{1.4cm}|p{1.8cm}|}\n",
|
||||
"\\hline\n",
|
||||
"\\textbf{Method/LLM} &\n",
|
||||
"\\textbf{Avg. Cols Examined} &\n",
|
||||
"\\textbf{Avg. Search Space Reduc.} \\\\\n",
|
||||
"\\hline\n",
|
||||
"bulk\\_extractor-v1.6 (baseline) & NA & 0.0\\% \\\\\n",
|
||||
"\\hline\n",
|
||||
"GPT-4o-mini & 9.1 & 85.0\\% \\\\\n",
|
||||
"\\hline\n",
|
||||
"Gemini-2.5-Pro & 0 & 100.0\\% \\\\\n",
|
||||
"\\hline\n",
|
||||
"Qwen2.5-72B & 31.5 & 41.2\\% \\\\\n",
|
||||
"\\hline\n",
|
||||
"LLaMA-3.1-70B-Instruct & 1 & 97.5\\% \\\\\n",
|
||||
"\\hline\n",
|
||||
"LLaMA-3.1-8B-Instruct & 0.4 & 99.9\\% \\\\\n",
|
||||
"\\hline\n",
|
||||
"Mixtral-8x22B & 3.2 & 90.3\\% \\\\\n",
|
||||
"\\hline\n",
|
||||
"Mixtral-8x7B & 2.6 & 97.2\\% \\\\\n",
|
||||
"\\hline\n",
|
||||
"Mistral-Large & 24.9 & 75.6\\% \\\\\n",
|
||||
"\\hline\n",
|
||||
"GPT-5.1 & 29.9 & 81.0\\% \\\\\n",
|
||||
"\\hline\n",
|
||||
"GPT-4.1 & 20.5 & 69.7\\% \\\\\n",
|
||||
"\\hline\n",
|
||||
"GPT-3.5-turbo & 2.8 & 98.6\\% \\\\\n",
|
||||
"\\hline\n",
|
||||
"\\end{tabular}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import json\n",
|
||||
"import csv\n",
|
||||
"from pathlib import Path\n",
|
||||
"from statistics import mean\n",
|
||||
"\n",
|
||||
"# -------------------------------------------------\n",
|
||||
"# Auto-detect project root (Agent directory)\n",
|
||||
"# -------------------------------------------------\n",
|
||||
"BASE_DIR = Path(r\"C:\\Users\\cyfij\\OneDrive\\Desktop\\DFRWS 2026\\Agent\\RQs\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"CANDIDATE_CSV = BASE_DIR / \"RQ2\" / \"app_total_columns.csv\"\n",
|
||||
"\n",
|
||||
"print(\"CSV path:\", CANDIDATE_CSV)\n",
|
||||
"print(\"Exists:\", CANDIDATE_CSV.exists())\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"print(\"Using BASE_DIR:\", BASE_DIR)\n",
|
||||
"print(\"Using CSV:\", CANDIDATE_CSV)\n",
|
||||
"print(\"CSV exists:\", CANDIDATE_CSV.exists())\n",
|
||||
"\n",
|
||||
"# -------------------------------------------------\n",
|
||||
"# MODELS\n",
|
||||
"# -------------------------------------------------\n",
|
||||
"MODELS = {\n",
|
||||
" \"gpt_4o_mini\": \"GPT-4o-mini\",\n",
|
||||
" \"gemini_2_5_pro\": \"Gemini-2.5-Pro\",\n",
|
||||
" \"qwen_2_5_72b\": \"Qwen2.5-72B\",\n",
|
||||
" \"llama_3_1_70b\": \"LLaMA-3.1-70B-Instruct\",\n",
|
||||
" \"llama_3_1_8b\": \"LLaMA-3.1-8B-Instruct\",\n",
|
||||
" \"mixtral_8x22b\": \"Mixtral-8x22B\",\n",
|
||||
" \"mixtral_8x7b\": \"Mixtral-8x7B\",\n",
|
||||
" \"mistral_large\": \"Mistral-Large\",\n",
|
||||
" \"gpt_5_1\": \"GPT-5.1\",\n",
|
||||
" \"gpt_4_1\": \"GPT-4.1\",\n",
|
||||
" \"gpt_3_5_turbo\": \"GPT-3.5-turbo\",\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"# -------------------------------------------------\n",
|
||||
"# Load candidate totals per app\n",
|
||||
"# CSV must contain columns:\n",
|
||||
"# app_code,total_columns\n",
|
||||
"# -------------------------------------------------\n",
|
||||
"def load_candidate_totals(csv_path):\n",
|
||||
" totals = {}\n",
|
||||
" with open(csv_path, newline=\"\", encoding=\"utf-8\") as f:\n",
|
||||
" reader = csv.DictReader(f)\n",
|
||||
" for row in reader:\n",
|
||||
" app = row.get(\"app_code\")\n",
|
||||
" total = row.get(\"total_columns\")\n",
|
||||
" if app and total:\n",
|
||||
" totals[app] = int(total)\n",
|
||||
" return totals\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# -------------------------------------------------\n",
|
||||
"# Load scanned columns per app\n",
|
||||
"# From normalized_results/<model>/app_level/app_level.jsonl\n",
|
||||
"# -------------------------------------------------\n",
|
||||
"def load_scanned_cols(app_level_jsonl: Path):\n",
|
||||
" \"\"\"\n",
|
||||
" Reads app_level.jsonl (corpus/app-level format).\n",
|
||||
"\n",
|
||||
" Your format:\n",
|
||||
" {\n",
|
||||
" \"db_path\": \"selectedDBs\\\\A1\",\n",
|
||||
" ...\n",
|
||||
" \"Num_of_source_columns_unique\": 29\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
" Returns:\n",
|
||||
" { \"A1\": scanned_columns }\n",
|
||||
" \"\"\"\n",
|
||||
"\n",
|
||||
" scanned = {}\n",
|
||||
"\n",
|
||||
" with app_level_jsonl.open(\"r\", encoding=\"utf-8\") as f:\n",
|
||||
" for line in f:\n",
|
||||
" line = line.strip()\n",
|
||||
" if not line:\n",
|
||||
" continue\n",
|
||||
"\n",
|
||||
" rec = json.loads(line)\n",
|
||||
"\n",
|
||||
" # ---- Extract app from db_path ----\n",
|
||||
" db_path = rec.get(\"db_path\", \"\")\n",
|
||||
" if not db_path:\n",
|
||||
" continue\n",
|
||||
"\n",
|
||||
" app = Path(db_path).name # \"A1\"\n",
|
||||
"\n",
|
||||
" # ---- Use UNIQUE count for efficiency ----\n",
|
||||
" n = rec.get(\"Num_of_source_columns_unique\")\n",
|
||||
" if n is None:\n",
|
||||
" n = rec.get(\"Num_of_source_columns\", 0)\n",
|
||||
"\n",
|
||||
" try:\n",
|
||||
" n = int(n)\n",
|
||||
" except:\n",
|
||||
" n = 0\n",
|
||||
"\n",
|
||||
" # Keep maximum seen per app\n",
|
||||
" scanned[app] = max(scanned.get(app, 0), n)\n",
|
||||
"\n",
|
||||
" return scanned\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# -------------------------------------------------\n",
|
||||
"# Main computation\n",
|
||||
"# -------------------------------------------------\n",
|
||||
"candidate_totals = load_candidate_totals(CANDIDATE_CSV)\n",
|
||||
"\n",
|
||||
"results = []\n",
|
||||
"\n",
|
||||
"for slug, display_name in MODELS.items():\n",
|
||||
"\n",
|
||||
" app_jsonl = (\n",
|
||||
" BASE_DIR\n",
|
||||
" / \"normalized_results\"\n",
|
||||
" / slug\n",
|
||||
" / \"app_level\"\n",
|
||||
" / \"app_level.jsonl\"\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" if not app_jsonl.exists():\n",
|
||||
" results.append((display_name, \"xx\", \"xx\"))\n",
|
||||
" continue\n",
|
||||
"\n",
|
||||
" scanned_cols = load_scanned_cols(app_jsonl)\n",
|
||||
"\n",
|
||||
" examined_vals = []\n",
|
||||
" reduction_vals = []\n",
|
||||
"\n",
|
||||
" for app, total_cols in candidate_totals.items():\n",
|
||||
"\n",
|
||||
" scanned = scanned_cols.get(app, 0)\n",
|
||||
" examined_vals.append(scanned)\n",
|
||||
"\n",
|
||||
" if total_cols > 0:\n",
|
||||
" reduction = 1 - (scanned / total_cols)\n",
|
||||
" reduction_vals.append(reduction)\n",
|
||||
"\n",
|
||||
" if examined_vals and reduction_vals:\n",
|
||||
" avg_examined = round(mean(examined_vals), 1)\n",
|
||||
" avg_reduction = round(mean(reduction_vals) * 100, 1)\n",
|
||||
" results.append((display_name, avg_examined, f\"{avg_reduction}\\\\%\"))\n",
|
||||
" else:\n",
|
||||
" results.append((display_name, \"xx\", \"xx\"))\n",
|
||||
"\n",
|
||||
"# -------------------------------------------------\n",
|
||||
"# Print LaTeX table\n",
|
||||
"# -------------------------------------------------\n",
|
||||
"print(r\"\\begin{tabular}{|l|p{1.4cm}|p{1.8cm}|}\")\n",
|
||||
"print(r\"\\hline\")\n",
|
||||
"print(r\"\\textbf{Method/LLM} &\")\n",
|
||||
"print(r\"\\textbf{Avg. Cols Examined} &\")\n",
|
||||
"print(r\"\\textbf{Avg. Search Space Reduc.} \\\\\")\n",
|
||||
"print(r\"\\hline\")\n",
|
||||
"print(r\"bulk\\_extractor-v1.6 (baseline) & NA & 0.0\\% \\\\\")\n",
|
||||
"print(r\"\\hline\")\n",
|
||||
"\n",
|
||||
"for name, cols, reduc in results:\n",
|
||||
" print(f\"{name} & {cols} & {reduc} \\\\\\\\\")\n",
|
||||
" print(r\"\\hline\")\n",
|
||||
"\n",
|
||||
"print(r\"\\end{tabular}\")\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "d17221d0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.14.0"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
Reference in New Issue
Block a user