Local authority dataset validation¶
from policyengine_uk import Microsimulation
import pandas as pd
import h5py
import numpy as np
import sys
from itables import init_notebook_mode
import itables.options as opt
from pathlib import Path
from policyengine_uk_data.storage import STORAGE_FOLDER
from policyengine_uk_data.utils.huggingface import download
opt.maxBytes = "1MB"
init_notebook_mode(all_interactive=True)
REPO = Path(".").resolve().parent
weights_file_path = STORAGE_FOLDER / "local_authority_weights.h5"
constituency_names_file_path = download(
repo="policyengine/policyengine-uk-data",
repo_filename="local_authorities_2021.csv",
local_folder=None,
version=None,
)
constituencies_2024 = pd.read_csv(constituency_names_file_path)
with h5py.File(weights_file_path, "r") as f:
weights = f[str(2025)][...]
baseline = Microsimulation()
household_weights = baseline.calculate("household_weight", 2025).values
from policyengine_uk_data.datasets.frs.local_areas.local_authorities.loss import create_local_authority_target_matrix, create_national_target_matrix
from policyengine_uk_data.datasets import EnhancedFRS_2022_23
local_authority_target_matrix, local_authority_actuals, _ = create_local_authority_target_matrix(EnhancedFRS_2022_23, 2025, None)
national_target_matrix, national_actuals = create_national_target_matrix(EnhancedFRS_2022_23, 2025, None)
local_authority_wide = weights @ local_authority_target_matrix
local_authority_wide.index = constituencies_2024.code.values
local_authority_wide["name"] = constituencies_2024.name.values
local_authority_results = pd.melt(local_authority_wide.reset_index(), id_vars=["index", "name"], var_name="variable", value_name="value")
local_authority_actuals.index = constituencies_2024.code.values
local_authority_actuals["name"] = constituencies_2024.name.values
local_authority_actuals_long = pd.melt(local_authority_actuals.reset_index(), id_vars=["index", "name"], var_name="variable", value_name="value")
local_authority_target_validation = pd.merge(local_authority_results, local_authority_actuals_long, on=["index", "variable"], suffixes=("_target", "_actual"))
local_authority_target_validation.drop("name_actual", axis=1, inplace=True)
local_authority_target_validation.columns = ["index", "name", "metric", "estimate", "target"]
local_authority_target_validation["error"] = local_authority_target_validation["estimate"] - local_authority_target_validation["target"]
local_authority_target_validation["abs_error"] = local_authority_target_validation["error"].abs()
local_authority_target_validation["rel_abs_error"] = local_authority_target_validation["abs_error"] / local_authority_target_validation["target"]
Loading...
Calibration check¶
Looking at the sorted validation results by relative absolute error shows how well our calibrated weights perform against the actual target statistics across UK local authorities under 2021 boundaries. The table reveals the accuracy of our estimates, from the closest matches to the largest discrepancies, where a lower relative error indicates better calibration performance.
local_authority_target_validation.sort_values("rel_abs_error")
Loading...
national_performance = household_weights @ national_target_matrix
national_target_validation = pd.DataFrame({"metric": national_performance.index, "estimate": national_performance.values})
national_target_validation["target"] = national_actuals.values
national_target_validation["error"] = national_target_validation["estimate"] - national_target_validation["target"]
national_target_validation["abs_error"] = national_target_validation["error"].abs()
national_target_validation["rel_abs_error"] = national_target_validation["abs_error"] / national_target_validation["target"]
The table below shows the relative absolute error for each calibration target at the national level, sorted from the closest matches to the largest discrepancies.
national_target_validation.sort_values("rel_abs_error")
Loading...