From cbad993a0a958ecf2c643efc8c2470fea30df19f Mon Sep 17 00:00:00 2001 From: dogeystamp Date: Mon, 30 Dec 2024 22:53:59 -0500 Subject: [PATCH] feat: torch data loader --- nnue/.gitignore | 1 + nnue/README.md | 14 ++-- ...batch_pgn_data.py => s1_batch_pgn_data.py} | 2 - ...ess_pgn_data.py => s2_process_pgn_data.py} | 0 nnue/s3_train_neural_net.py | 75 +++++++++++++++++++ 5 files changed, 85 insertions(+), 7 deletions(-) rename nnue/{batch_pgn_data.py => s1_batch_pgn_data.py} (97%) rename nnue/{process_pgn_data.py => s2_process_pgn_data.py} (100%) create mode 100755 nnue/s3_train_neural_net.py diff --git a/nnue/.gitignore b/nnue/.gitignore index e88566d..2afa67a 100644 --- a/nnue/.gitignore +++ b/nnue/.gitignore @@ -1,3 +1,4 @@ batches/ venv/ train_data/ +__pycache__/ diff --git a/nnue/README.md b/nnue/README.md index a3acf5b..02f86a7 100644 --- a/nnue/README.md +++ b/nnue/README.md @@ -6,15 +6,19 @@ The network is trained on both self-play games, and its games on Lichess. Both of these sources provide games in PGN format. This folder includes the following scripts: -- `batch_pgn_data.py`: Combine and convert big PGN files into small chunked files. -- `process_pgn_data.py`: Convert PGN data into a format suitable for training. +- `s1_batch_pgn_data.py`: Combine and convert big PGN files into small chunked files. +- `s2_process_pgn_data.py`: Convert PGN data into a format suitable for training. Example training pipeline: ```bash # chunk all the PGN files in `games/`. outputs by default to `batches/batch%d.pgn`. -./batch_pgn_data.py games/*.pgn +./s1_batch_pgn_data.py games/*.pgn -# analyze batches 0 to 20 to turn them into training data. outputs by default to train_data/batch%d.tsv.gz. +# analyze batches to turn them into training data. outputs by default to train_data/batch%d.tsv.gz. # set max-workers to the number of hardware threads / cores you have. -./process_pgn_data.py --engine ../target/release/chess_inator --max-workers 8 batches/batch{0..20}.pgn +# this is the longest part. +./s2_process_pgn_data.py --engine ../target/release/chess_inator --max-workers 8 batches/batch*.pgn + +# combine all processed data into a single training set file. +zcat train_data/*.tsv.gz | gzip > combined_training.tsv.gz ``` diff --git a/nnue/batch_pgn_data.py b/nnue/s1_batch_pgn_data.py similarity index 97% rename from nnue/batch_pgn_data.py rename to nnue/s1_batch_pgn_data.py index 20f2314..a19a3be 100755 --- a/nnue/batch_pgn_data.py +++ b/nnue/s1_batch_pgn_data.py @@ -18,8 +18,6 @@ import itertools from pathlib import Path -"""Games to include per file in output.""" - parser = argparse.ArgumentParser() parser.add_argument("files", nargs="+", type=Path) parser.add_argument("--batch-size", type=int, help="Number of games to save in each output file. Set this to two to four times the amount of concurrent workers used in the processing step.", default=8) diff --git a/nnue/process_pgn_data.py b/nnue/s2_process_pgn_data.py similarity index 100% rename from nnue/process_pgn_data.py rename to nnue/s2_process_pgn_data.py diff --git a/nnue/s3_train_neural_net.py b/nnue/s3_train_neural_net.py new file mode 100755 index 0000000..7dcadf1 --- /dev/null +++ b/nnue/s3_train_neural_net.py @@ -0,0 +1,75 @@ +#!/usr/bin/env python + +"""Train the NNUE weights.""" + +import torch +import pandas as pd +import numpy as np + +from torch.utils.data import Dataset, DataLoader +from pathlib import Path +from dataclasses import dataclass + + +################################ +################################ +## Data loading / parsing +################################ +################################ + + +@dataclass +class Position: + """Single board position.""" + + fen: str + """Normal board representation.""" + + board: torch.Tensor + """Multi-hot board representation.""" + + cp_eval: np.double + """Centipawn evaluation (white perspective).""" + + expected_points: np.double + """ + Points expected to be gained for white from the game, based on centipawn evaluation. + + - 0: black win + - 0.5: draw + - 1: white win + """ + + +def sigmoid(x): + """Calculate sigmoid of `x`, using scaling constant `K`.""" + K = 150 + return 1 / (1 + np.exp(-K * x / 400)) + + +class ChessPositionDataset(Dataset): + def __init__(self, data_file: Path): + self.data = pd.read_csv(data_file, delimiter="\t") + + def __len__(self): + return len(self.data) + + def __getitem__(self, idx): + row = self.data.iloc[idx] + + eval = np.double(row.iloc[2]) + + return Position( + fen=row.iloc[0], + board=torch.as_tensor([1 if c == "1" else 0 for c in row.iloc[1]]), + cp_eval=eval, + expected_points=sigmoid(eval/100), + ) + +if __name__ == "__main__": + full_dataset = ChessPositionDataset(Path("combined_training.tsv.gz")) + + train_dataset, test_dataset = torch.utils.data.random_split(full_dataset, [0.8, 0.2]) + + train_dataloader = DataLoader(train_dataset, batch_size=64, shuffle=True) + test_dataloader = DataLoader(test_dataset, batch_size=64, shuffle=True)