aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--README.md16
-rw-r--r--rdb-to-sheets.py165
-rw-r--r--requirements.txt8
-rw-r--r--setup.sh5
4 files changed, 194 insertions, 0 deletions
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..3e14c6a
--- /dev/null
+++ b/README.md
@@ -0,0 +1,16 @@
+# rdb-to-sheets
+A very simple python script to convert the TXT output from RDB to an Excel Sheet
+
+## How to use
+1. On Meridian 1 do : LD 21; REQ PRT; TYPE RDB;
+1. Copy the output from the first DN up to the last DN listed (see example below)
+1. Save to somewhere as a raw txt
+1. Use Bash script to set up Python3 VENV, or install requirements.txt things with pip on your system however you please.
+1. Run : python3 ./rdb-to-sheets.py /your/rdb/file.txt
+1. Enjoy your now converted rdb excel sheet (made in same directory as rdb-to-sheets.py by default, can also be anywhere you desire python3 like this ./rdb-to-sheets.py /your/rdb/file.txt /your/output/rdb/sheet.xslx)
+
+## Consideration
+1. I know little coding and even less python, I've tested it on my Meridian 1 Option 11e only as thats the only system I own.
+1. If you have suggestions on how to improve this feel free to submit those here.
+1. If I made something completely wrong in the script its not my responsibility.
+1. Have fun !
diff --git a/rdb-to-sheets.py b/rdb-to-sheets.py
new file mode 100644
index 0000000..b715284
--- /dev/null
+++ b/rdb-to-sheets.py
@@ -0,0 +1,165 @@
+import sys
+import pandas as pd
+import re
+
+# Ensure its easier to use for normies
+if len(sys.argv) < 2:
+ print("Usage: python rdb-to-sheets.py <input_file> [output_file]")
+ print("Example: python rdb-to-sheets.py RDBv2.txt RDB.xlsx")
+ print("If no output file is defined RDB.xlsx will be used per default make sure such doesn't exist already")
+ sys.exit(1) # Committ da sus side for script!
+
+# Set input and output file
+input_file = sys.argv[1]
+output_path = sys.argv[2] if len(sys.argv) > 2 else "RDB.xlsx" # Default to RDB.xlsx if user of script doesnt define it lmao
+
+
+with open(input_file, "r") as file:
+ lines = file.readlines()
+
+# Look at the data
+def parse_rdb_data(lines):
+ entries = []
+ current_entry = {}
+ current_tns = []
+
+ for line in lines:
+ line = line.strip()
+ if not line:
+ continue
+
+ # New DN entry starts
+ if line.startswith("DN"):
+ if current_entry: # How about we save the previous entry before we look at a new one omg
+ if current_tns:
+ current_entry["TNs"] = current_tns
+ entries.append(current_entry)
+ current_tns = []
+ current_entry = {"DN": line.split()[1]}
+
+ elif "CPND" in line:
+ current_entry["NAME"] = None
+
+ elif "NAME" in line and current_entry.get("NAME") is None:
+ current_entry["NAME"] = line.replace("NAME", "").strip()
+
+ elif line.startswith("TYPE"):
+ current_entry["TYPE"] = line.split()[1]
+
+ elif line.startswith("ROUT"):
+ current_entry["ROUT"] = line.split()[1]
+
+ elif line.startswith("STCD"):
+ current_entry["STCD"] = line.split()[1]
+
+ elif line.startswith("FEAT"):
+ current_entry["FEAT"] = " ".join(line.split()[1:])
+
+ elif line.startswith("TN"):
+ tn_match = re.search(r"(\d{3} \d \d{2} \d{2})", line)
+ if tn_match:
+ tn_value = tn_match.group(1)
+ tn_entry = {"TN_1_TN": tn_value}
+
+ key_match = re.search(r"KEY (\d{2})", line)
+ if key_match:
+ tn_entry["KEY"] = key_match.group(1)
+
+ des_match = re.search(r"DES\s+(\S+)", line)
+ if des_match:
+ tn_entry["DES"] = des_match.group(1)
+
+ date_match = re.search(r"(\d{1,2} \w{3} \d{4})", line)
+ if date_match:
+ tn_entry["DATE"] = date_match.group(0)
+
+ tn_entry["MARP"] = "YES" if "MARP" in line else "NO"
+ current_tns.append(tn_entry)
+
+ if current_entry:
+ if current_tns:
+ current_entry["TNs"] = current_tns
+ entries.append(current_entry)
+
+ return entries
+
+parsed_entries = parse_rdb_data(lines)
+
+# Categorize them and shit
+def categorize_entries(entries):
+ main_entries = []
+ multiple_tns_entries = []
+ route_entries = []
+ cdp_entries = []
+ feature_code_entries = []
+ att_ldn_entries = []
+ other_entries = []
+ unexpected_data = []
+
+ for entry in entries:
+ entry_type = entry.get("TYPE")
+
+ if entry_type in ["SL1", "500"]:
+ if "TNs" in entry and len(entry["TNs"]) > 1:
+ for tn in entry["TNs"]:
+ multi_entry = entry.copy()
+ multi_entry.update(tn)
+ multiple_tns_entries.append(multi_entry)
+ elif "TNs" in entry:
+ for tn in entry["TNs"]:
+ main_entry = entry.copy()
+ main_entry.update(tn)
+ main_entries.append(main_entry)
+ else:
+ main_entries.append(entry)
+
+ elif entry_type in ["ATT", "LDN"]:
+ if "TNs" in entry:
+ for tn in entry["TNs"]:
+ att_ldn_entry = entry.copy()
+ att_ldn_entry.update(tn)
+ att_ldn_entries.append(att_ldn_entry)
+ else:
+ att_ldn_entries.append(entry)
+
+ elif entry_type == "RDB":
+ route_entries.append(entry)
+
+ elif entry_type == "CDP":
+ cdp_entries.append({"DN": entry.get("DN", ""), "TYPE": entry.get("TYPE", ""), "STCD": entry.get("STCD", "DSC")})
+
+ elif entry_type == "FFC":
+ feature_code_entries.append(entry)
+
+ elif entry_type is None:
+ unexpected_data.append(entry)
+
+ else:
+ other_entries.append(entry)
+
+ unexpected_sheet_name = "No Unexpected Data" if not unexpected_data else "Unexpected Data"
+
+ return main_entries, multiple_tns_entries, route_entries, cdp_entries, feature_code_entries, att_ldn_entries, other_entries, unexpected_data, unexpected_sheet_name
+
+# Categorize entries further
+main_entries, multiple_tns_entries, route_entries, cdp_entries, feature_code_entries, att_ldn_entries, other_entries, unexpected_data, unexpected_sheet_name = categorize_entries(parsed_entries)
+
+
+# Save all the data to excel but dropping the raw unprocessed TNs tab that gets made for some reason I dont know why I hate coding and python
+with pd.ExcelWriter(output_path) as writer:
+ for sheet_name, data in {
+ "Main": main_entries,
+ "Multiple TNs": multiple_tns_entries,
+ "Routes": route_entries,
+ "CDP": cdp_entries,
+ "Feature Codes": feature_code_entries,
+ "ATT_LDN": att_ldn_entries,
+ "Other": other_entries,
+ unexpected_sheet_name: unexpected_data
+ }.items():
+ df = pd.DataFrame(data)
+ if "TNs" in df.columns:
+ df = df.drop(columns=["TNs"]) # Remove TNs column if present
+ df.to_excel(writer, sheet_name=sheet_name, index=False)
+
+print("Processing complete. Output saved to", output_path)
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000..358d115
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,8 @@
+et_xmlfile==2.0.0
+numpy==2.2.2
+openpyxl==3.1.5
+pandas==2.2.3
+python-dateutil==2.9.0.post0
+pytz==2025.1
+six==1.17.0
+tzdata==2025.1
diff --git a/setup.sh b/setup.sh
new file mode 100644
index 0000000..26fc88f
--- /dev/null
+++ b/setup.sh
@@ -0,0 +1,5 @@
+#!/bin/bash
+python3 -m venv .venv
+source .venv/bin/activate
+pip3 install -r requirements.txt
+echo "venv made and activated. requirements should be installed too now"