{"id":278,"date":"2024-03-12T00:18:37","date_gmt":"2024-03-11T16:18:37","guid":{"rendered":"http:\/\/tobykskgd.life\/?p=278"},"modified":"2024-11-14T22:15:54","modified_gmt":"2024-11-14T14:15:54","slug":"11","status":"publish","type":"post","link":"https:\/\/tobykskgd.life\/index.php\/11\/","title":{"rendered":"\u674e\u5b8f\u6bc5\u673a\u5668\u5b66\u4e60\u8bfe\u7a0b\u7b14\u8bb0EP7"},"content":{"rendered":"\n<p>\u3010HW2\u3011slides0.1\u674e\u5b8f\u6bc52021\/2022\u6625\u673a\u5668\u5b66\u4e60\u8bfe\u7a0b\u7b14\u8bb0EP7(P28-P30)<\/p>\n\n\n\n<figure class=\"wp-block-image size-full\"><div class='fancybox-wrapper lazyload-container-unload' data-fancybox='post-images' href='https:\/\/tobykskgd.life\/wp-content\/uploads\/2024\/02\/\u5c4f\u5e55\u622a\u56fe-2024-02-05-213355.png'><img class=\"lazyload lazyload-style-1\" src=\"data:image\/svg+xml;base64,PCEtLUFyZ29uTG9hZGluZy0tPgo8c3ZnIHdpZHRoPSIxIiBoZWlnaHQ9IjEiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgc3Ryb2tlPSIjZmZmZmZmMDAiPjxnPjwvZz4KPC9zdmc+\"  loading=\"lazy\" decoding=\"async\" width=\"432\" height=\"218\" data-original=\"https:\/\/tobykskgd.life\/wp-content\/uploads\/2024\/02\/\u5c4f\u5e55\u622a\u56fe-2024-02-05-213355.png\" src=\"data:image\/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAJcEhZcwAADsQAAA7EAZUrDhsAAAANSURBVBhXYzh8+PB\/AAffA0nNPuCLAAAAAElFTkSuQmCC\" alt=\"\" class=\"wp-image-37\"  sizes=\"auto, (max-width: 432px) 100vw, 432px\" \/><\/div><\/figure>\n\n\n\n<figure class=\"wp-block-image size-full\"><div class='fancybox-wrapper lazyload-container-unload' data-fancybox='post-images' href='https:\/\/tobykskgd.life\/wp-content\/uploads\/2024\/03\/\u5c4f\u5e55\u622a\u56fe-2024-03-12-000823.png'><img class=\"lazyload lazyload-style-1\" src=\"data:image\/svg+xml;base64,PCEtLUFyZ29uTG9hZGluZy0tPgo8c3ZnIHdpZHRoPSIxIiBoZWlnaHQ9IjEiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgc3Ryb2tlPSIjZmZmZmZmMDAiPjxnPjwvZz4KPC9zdmc+\"  loading=\"lazy\" decoding=\"async\" width=\"957\" height=\"167\" data-original=\"https:\/\/tobykskgd.life\/wp-content\/uploads\/2024\/03\/\u5c4f\u5e55\u622a\u56fe-2024-03-12-000823.png\" src=\"data:image\/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAJcEhZcwAADsQAAA7EAZUrDhsAAAANSURBVBhXYzh8+PB\/AAffA0nNPuCLAAAAAElFTkSuQmCC\" alt=\"\" class=\"wp-image-279\"  sizes=\"auto, (max-width: 957px) 100vw, 957px\" \/><\/div><\/figure>\n\n\n\n<p>\u4ece\u4eca\u5929\u5f00\u59cb\u6211\u5c06\u5b66\u4e60\u674e\u5b8f\u6bc5\u6559\u6388\u7684\u673a\u5668\u5b66\u4e60\u89c6\u9891\uff0c\u4e0b\u9762\u662f\u8bfe\u7a0b\u7684\u8fde\u63a5<a href=\"https:\/\/www.bilibili.com\/video\/BV1Wv411h7kN\/?spm_id_from=333.337.search-card.all.click&amp;vd_source=fa9de75b9e5251495ee15fc767cb5892\">(\u5f3a\u63a8)\u674e\u5b8f\u6bc52021\/2022\u6625\u673a\u5668\u5b66\u4e60\u8bfe\u7a0b_\u54d4\u54e9\u54d4\u54e9_bilibili<\/a>\u3002\u4e00\u5171\u6709155\u4e2a\u89c6\u9891\uff0c\u4e89\u53d6\u90fd\u5b66\u4e60\u5b8c\u6210\u5427\u3002<\/p>\n\n\n\n<p>\u90a3\u4e48\u9996\u5148\u8fd9\u95e8\u8bfe\u7a0b\u9700\u8981\u6709\u4e00\u5b9a\u7684\u4ee3\u7801\u57fa\u7840\uff0c\u7b80\u5355\u5b66\u4e60\u4e00\u4e0bPython\u7684\u57fa\u672c\u7528\u6cd5\uff0c\u8fd8\u6709\u91cc\u9762\u7684NumPy\u5e93\u7b49\u7b49\u7684\u57fa\u672c\u77e5\u8bc6\u3002\u518d\u5c31\u662f\u6570\u5b66\u65b9\u9762\u7684\u57fa\u7840\u5566\uff0c\u5fae\u79ef\u5206\u3001\u7ebf\u6027\u4ee3\u6570\u548c\u6982\u7387\u8bba\u7684\u57fa\u7840\u90fd\u662f\u542c\u61c2\u8fd9\u95e8\u8bfe\u5fc5\u987b\u7684\u3002<\/p>\n\n\n\n<hr class=\"wp-block-separator has-alpha-channel-opacity\"\/>\n\n\n\n<p>\u8fd9\u91cc\u662f\u8fd9\u95e8\u8bfe\u7a0b\u7684\u4f5c\u4e1a2\uff0c\u90a3\u56e0\u4e3a\u672c\u4eba\u6bd4\u8f83\u5e9f\uff0c\u57fa\u7840\u8fd8\u6ca1\u6709\u5f88\u597d\uff0c\u8fd9\u91cc\u5148\u653e\u4e00\u4e2a\u57fa\u672c\u6ca1\u6709\u4ec0\u4e48\u6539\u52a8\u7684\u52a9\u6559\u7684\u7a0b\u5e8f\u4f5c\u4e3aHW2\u76840.1\u7248\u672c\uff0c\u8fd9\u91cc\u56e0\u4e3a\u6ca1\u6709\u4ec0\u4e48\u6539\u52a8\uff0c\u8fd9\u4e2amodel\u4e5f\u5f88\u5e9f\u5566\uff0c\u8fd9\u91cc\u7684loss\u6bd4\u8f83\u5927\u3002\u7b49\u4e4b\u540e\u6709\u4e86\u6570\u5b66\u548c\u7a0b\u5e8f\u4e0a\u7684\u57fa\u7840\u4e4b\u540e\uff0c\u4e00\u5b9a\u4f1a\u56de\u6765\u8865\u4e00\u4e2a\u6548\u679c\u597d\u4e00\u70b9\u76841.0\u7248\u672c\uff01<\/p>\n\n\n\n<p>\u8fd9\u91cc\u6211\u5728\u52a9\u6559\u7684\u4ee3\u7801\u57fa\u7840\u4e0a\u6539\u4e86\u8d85\u53c2\u6570\u7684\u6570\u503c\uff0c\u8ba9\u6a21\u578b\u7684loss\u4e0b\u964d\u4e86\u4e00\u70b9\u70b9\uff0c\u4f46\u662f\u4e00\u70b9\u70b9\u7684\u8fdb\u6b65\u7684\u4ee3\u4ef7\u662f\u8bad\u7ec3\u8017\u8d39\u7684\u65f6\u95f4\u6307\u6570\u578b\u7684\u66b4\u589e\u3002\u8fd8\u662f\u7b49\u4e4b\u540e\u5168\u90e8\u5b66\u5b8c\u4e4b\u540e\uff0c\u518d\u6765\u8865\u4e00\u4e2a\u6539\u8fdb\u7248\u672c\u5427\u3002<\/p>\n\n\n\n<hr class=\"wp-block-separator has-alpha-channel-opacity is-style-dots\"\/>\n\n\n\n<p>\u4e0b\u8f7d\u8bad\u7ec3\/\u6d4b\u8bd5\u6570\u636e\u96c6<\/p>\n\n\n\n<pre class=\"wp-block-code\"><code>!pip install --upgrade gdown\n\n# Main link\n!gdown --id '1o6Ag-G3qItSmYhTheX6DYiuyNzWyHyTc' --output libriphone.zip\n\n# Backup link 1\n# !gdown --id '1R1uQYi4QpX0tBfUWt2mbZcncdBsJkxeW' --output libriphone.zip\n\n# Bqckup link 2\n# !wget -O libriphone.zip \"https:\/\/www.dropbox.com\/s\/wqww8c5dbrl2ka9\/libriphone.zip?dl=1\"\n\n!unzip -q libriphone.zip\n!ls libriphone<\/code><\/pre>\n\n\n\n<p>Requirement already satisfied: gdown in \/usr\/local\/lib\/python3.10\/dist-packages (4.7.3) Collecting gdown Downloading gdown-5.1.0-py3-none-any.whl (17 kB) Requirement already satisfied: beautifulsoup4 in \/usr\/local\/lib\/python3.10\/dist-packages (from gdown) (4.12.3) Requirement already satisfied: filelock in \/usr\/local\/lib\/python3.10\/dist-packages (from gdown) (3.13.1) Requirement already satisfied: requests[socks] in \/usr\/local\/lib\/python3.10\/dist-packages (from gdown) (2.31.0) Requirement already satisfied: tqdm in \/usr\/local\/lib\/python3.10\/dist-packages (from gdown) (4.66.2) Requirement already satisfied: soupsieve&gt;1.2 in \/usr\/local\/lib\/python3.10\/dist-packages (from beautifulsoup4-&gt;gdown) (2.5) Requirement already satisfied: charset-normalizer&lt;4,&gt;=2 in \/usr\/local\/lib\/python3.10\/dist-packages (from requests[socks]-&gt;gdown) (3.3.2) Requirement already satisfied: idna&lt;4,&gt;=2.5 in \/usr\/local\/lib\/python3.10\/dist-packages (from requests[socks]-&gt;gdown) (3.6) Requirement already satisfied: urllib3&lt;3,&gt;=1.21.1 in \/usr\/local\/lib\/python3.10\/dist-packages (from requests[socks]-&gt;gdown) (2.0.7) Requirement already satisfied: certifi&gt;=2017.4.17 in \/usr\/local\/lib\/python3.10\/dist-packages (from requests[socks]-&gt;gdown) (2024.2.2) Requirement already satisfied: PySocks!=1.5.7,&gt;=1.5.6 in \/usr\/local\/lib\/python3.10\/dist-packages (from requests[socks]-&gt;gdown) (1.7.1) Installing collected packages: gdown Attempting uninstall: gdown Found existing installation: gdown 4.7.3 Uninstalling gdown-4.7.3: Successfully uninstalled gdown-4.7.3 Successfully installed gdown-5.1.0 \/usr\/local\/lib\/python3.10\/dist-packages\/gdown\/__main__.py:132: FutureWarning: Option `&#8211;id` was deprecated in version 4.3.1 and will be removed in 5.0. You don&#8217;t need to pass it anymore to use a file ID. warnings.warn( Downloading&#8230; From (original): <a rel=\"noreferrer noopener\" target=\"_blank\" href=\"https:\/\/drive.google.com\/uc?id=1o6Ag-G3qItSmYhTheX6DYiuyNzWyHyTc\">https:\/\/drive.google.com\/uc?id=1o6Ag-G3qItSmYhTheX6DYiuyNzWyHyTc<\/a> From (redirected): <a rel=\"noreferrer noopener\" target=\"_blank\" href=\"https:\/\/drive.usercontent.google.com\/download?id=1o6Ag-G3qItSmYhTheX6DYiuyNzWyHyTc&amp;confirm=t&amp;uuid=5f6f60a2-b8f1-417e-8539-c50d69f106d6\">https:\/\/drive.usercontent.google.com\/download?id=1o6Ag-G3qItSmYhTheX6DYiuyNzWyHyTc&amp;confirm=t&amp;uuid=5f6f60a2-b8f1-417e-8539-c50d69f106d6<\/a> To: \/content\/libriphone.zip 100% 479M\/479M [00:09&lt;00:00, 51.8MB\/s] feat test_split.txt train_labels.txt train_split.txt<\/p>\n\n\n\n<p>\u6570\u636e\u51c6\u5907<\/p>\n\n\n\n<pre class=\"wp-block-code\"><code>import os\nimport random\nimport pandas as pd\nimport torch\nfrom tqdm import tqdm\n\ndef load_feat(path):\n    feat = torch.load(path)\n    return feat\n\ndef shift(x, n):\n    if n &lt; 0:\n        left = x&#91;0].repeat(-n, 1)\n        right = x&#91;:n]\n\n    elif n &gt; 0:\n        right = x&#91;-1].repeat(n, 1)\n        left = x&#91;n:]\n    else:\n        return x\n\n    return torch.cat((left, right), dim=0)\n\ndef concat_feat(x, concat_n):\n    assert concat_n % 2 == 1 # n must be odd\n    if concat_n &lt; 2:\n        return x\n    seq_len, feature_dim = x.size(0), x.size(1)\n    x = x.repeat(1, concat_n) \n    x = x.view(seq_len, concat_n, feature_dim).permute(1, 0, 2) # concat_n, seq_len, feature_dim\n    mid = (concat_n \/\/ 2)\n    for r_idx in range(1, mid+1):\n        x&#91;mid + r_idx, :] = shift(x&#91;mid + r_idx], r_idx)\n        x&#91;mid - r_idx, :] = shift(x&#91;mid - r_idx], -r_idx)\n\n    return x.permute(1, 0, 2).view(seq_len, concat_n * feature_dim)\n\ndef preprocess_data(split, feat_dir, phone_path, concat_nframes, train_ratio=0.8, train_val_seed=1337):\n    class_num = 41 # NOTE: pre-computed, should not need change\n    mode = 'train' if (split == 'train' or split == 'val') else 'test'\n\n    label_dict = {}\n    if mode != 'test':\n      phone_file = open(os.path.join(phone_path, f'{mode}_labels.txt')).readlines()\n\n      for line in phone_file:\n          line = line.strip('\\n').split(' ')\n          label_dict&#91;line&#91;0]] = &#91;int(p) for p in line&#91;1:]]\n\n    if split == 'train' or split == 'val':\n        # split training and validation data\n        usage_list = open(os.path.join(phone_path, 'train_split.txt')).readlines()\n        random.seed(train_val_seed)\n        random.shuffle(usage_list)\n        percent = int(len(usage_list) * train_ratio)\n        usage_list = usage_list&#91;:percent] if split == 'train' else usage_list&#91;percent:]\n    elif split == 'test':\n        usage_list = open(os.path.join(phone_path, 'test_split.txt')).readlines()\n    else:\n        raise ValueError('Invalid \\'split\\' argument for dataset: PhoneDataset!')\n\n    usage_list = &#91;line.strip('\\n') for line in usage_list]\n    print('&#91;Dataset] - # phone classes: ' + str(class_num) + ', number of utterances for ' + split + ': ' + str(len(usage_list)))\n\n    max_len = 3000000\n    X = torch.empty(max_len, 39 * concat_nframes)\n    if mode != 'test':\n      y = torch.empty(max_len, dtype=torch.long)\n\n    idx = 0\n    for i, fname in tqdm(enumerate(usage_list)):\n        feat = load_feat(os.path.join(feat_dir, mode, f'{fname}.pt'))\n        cur_len = len(feat)\n        feat = concat_feat(feat, concat_nframes)\n        if mode != 'test':\n          label = torch.LongTensor(label_dict&#91;fname])\n\n        X&#91;idx: idx + cur_len, :] = feat\n        if mode != 'test':\n          y&#91;idx: idx + cur_len] = label\n\n        idx += cur_len\n\n    X = X&#91;:idx, :]\n    if mode != 'test':\n      y = y&#91;:idx]\n\n    print(f'&#91;INFO] {split} set')\n    print(X.shape)\n    if mode != 'test':\n      print(y.shape)\n      return X, y\n    else:\n      return X<\/code><\/pre>\n\n\n\n<p>\u5b9a\u4e49\u6570\u636e\u96c6<\/p>\n\n\n\n<pre class=\"wp-block-code\"><code>import torch\nfrom torch.utils.data import Dataset\nfrom torch.utils.data import DataLoader\n\nclass LibriDataset(Dataset):\n    def __init__(self, X, y=None):\n        self.data = X\n        if y is not None:\n            self.label = torch.LongTensor(y)\n        else:\n            self.label = None\n\n    def __getitem__(self, idx):\n        if self.label is not None:\n            return self.data&#91;idx], self.label&#91;idx]\n        else:\n            return self.data&#91;idx]\n\n    def __len__(self):\n        return len(self.data)<\/code><\/pre>\n\n\n\n<p>\u5b9a\u4e49\u6a21\u578b<\/p>\n\n\n\n<pre class=\"wp-block-code\"><code>import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass BasicBlock(nn.Module):\n    def __init__(self, input_dim, output_dim):\n        super(BasicBlock, self).__init__()\n\n        self.block = nn.Sequential(\n            nn.Linear(input_dim, output_dim),\n            nn.ReLU(),\n        )\n\n    def forward(self, x):\n        x = self.block(x)\n        return x\n\n\nclass Classifier(nn.Module):\n    def __init__(self, input_dim, output_dim=41, hidden_layers=1, hidden_dim=256):\n        super(Classifier, self).__init__()\n\n        self.fc = nn.Sequential(\n            BasicBlock(input_dim, hidden_dim),\n            *&#91;BasicBlock(hidden_dim, hidden_dim) for _ in range(hidden_layers)],\n            nn.Linear(hidden_dim, output_dim)\n        )\n\n    def forward(self, x):\n        x = self.fc(x)\n        return x<\/code><\/pre>\n\n\n\n<p>\u8d85\u53c2\u6570\uff08\u8fd9\u91cc\u6539\u52a8\u4e86\u4e00\u70b9\uff09<\/p>\n\n\n\n<pre class=\"wp-block-code\"><code># \u6570\u636e\u53c2\u6570\nconcat_nframes = 1              # \u8981\u8fde\u63a5\u7684\u5e27\u6570\uff0cn\u5fc5\u987b\u662f\u5947\u6570(\u603b\u51712k+1n\u5e27)\ntrain_ratio = 0.8               # \u6570\u636e\u7684\u6bd4\u4f8b\u7528\u4e8e\u8bad\u7ec3\uff0c\u5176\u4f59\u7684\u5c06\u7528\u4e8e\u9a8c\u8bc1\n\n# \u8bad\u7ec3\u53c2\u6570\nseed = 0                        # \u968f\u673a\u79cd\u5b50\nbatch_size = 1024                # batch size\nnum_epoch = 100                   # \u8bad\u7ec3\u5386\u5143\u7684\u6570\u76ee\nlearning_rate = 1e-5         # \u5b66\u4e60\u7387\nmodel_path = '.\/model.ckpt'     # \u4fdd\u5b58\u68c0\u67e5\u70b9\u7684\u8def\u5f84\n\n# \u6a21\u578b\u53c2\u6570\ninput_dim = 39 * concat_nframes # \u6a21\u578b\u7684\u8f93\u5165\u503c\uff0c\u4e0d\u5e94\u66f4\u6539\u5176\u503c\nhidden_layers = 1               # \u9690\u85cf\u5c42\u7684\u6570\u91cf\nhidden_dim = 256                # \u9690\u85cf\u7684\u6726\u80e7<\/code><\/pre>\n\n\n\n<p>\u6570\u636e\u96c6\u548c\u6a21\u578b\u7684\u51c6\u5907<\/p>\n\n\n\n<pre class=\"wp-block-code\"><code>import gc\n\n# preprocess data\ntrain_X, train_y = preprocess_data(split='train', feat_dir='.\/libriphone\/feat', phone_path='.\/libriphone', concat_nframes=concat_nframes, train_ratio=train_ratio)\nval_X, val_y = preprocess_data(split='val', feat_dir='.\/libriphone\/feat', phone_path='.\/libriphone', concat_nframes=concat_nframes, train_ratio=train_ratio)\n\n# get dataset\ntrain_set = LibriDataset(train_X, train_y)\nval_set = LibriDataset(val_X, val_y)\n\n# remove raw feature to save memory\ndel train_X, train_y, val_X, val_y\ngc.collect()\n\n# get dataloader\ntrain_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True)\nval_loader = DataLoader(val_set, batch_size=batch_size, shuffle=False)<\/code><\/pre>\n\n\n\n<p>[Dataset] &#8211; # phone classes: 41, number of utterances for train: 3428 3428it [00:05, 639.17it\/s] [INFO] train set torch.Size([2116368, 39]) torch.Size([2116368]) [Dataset] &#8211; # phone classes: 41, number of utterances for val: 858 858it [00:00, 1011.41it\/s] [INFO] val set torch.Size([527790, 39]) torch.Size([527790])<\/p>\n\n\n\n<pre class=\"wp-block-code\"><code>device = 'cuda:0' if torch.cuda.is_available() else 'cpu'\nprint(f'DEVICE: {device}')<\/code><\/pre>\n\n\n\n<p>DEVICE: cpu<\/p>\n\n\n\n<pre class=\"wp-block-code\"><code>import numpy as np\n\n#fix seed\ndef same_seeds(seed):\n    torch.manual_seed(seed)\n    if torch.cuda.is_available():\n        torch.cuda.manual_seed(seed)\n        torch.cuda.manual_seed_all(seed)  \n    np.random.seed(seed)  \n    torch.backends.cudnn.benchmark = False\n    torch.backends.cudnn.deterministic = True<\/code><\/pre>\n\n\n\n<pre class=\"wp-block-code\"><code># fix random seed\nsame_seeds(seed)\n\n# create model, define a loss function, and optimizer\nmodel = Classifier(input_dim=input_dim, hidden_layers=hidden_layers, hidden_dim=hidden_dim).to(device)\ncriterion = nn.CrossEntropyLoss() \noptimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate)<\/code><\/pre>\n\n\n\n<p>\u8bad\u7ec3<\/p>\n\n\n\n<pre class=\"wp-block-code\"><code>best_acc = 0.0\nfor epoch in range(num_epoch):\n    train_acc = 0.0\n    train_loss = 0.0\n    val_acc = 0.0\n    val_loss = 0.0\n    \n    # training\n    model.train() # set the model to training mode\n    for i, batch in enumerate(tqdm(train_loader)):\n        features, labels = batch\n        features = features.to(device)\n        labels = labels.to(device)\n        \n        optimizer.zero_grad() \n        outputs = model(features) \n        \n        loss = criterion(outputs, labels)\n        loss.backward() \n        optimizer.step() \n        \n        _, train_pred = torch.max(outputs, 1) # get the index of the class with the highest probability\n        train_acc += (train_pred.detach() == labels.detach()).sum().item()\n        train_loss += loss.item()\n    \n    # validation\n    if len(val_set) &gt; 0:\n        model.eval() # set the model to evaluation mode\n        with torch.no_grad():\n            for i, batch in enumerate(tqdm(val_loader)):\n                features, labels = batch\n                features = features.to(device)\n                labels = labels.to(device)\n                outputs = model(features)\n                \n                loss = criterion(outputs, labels) \n                \n                _, val_pred = torch.max(outputs, 1) \n                val_acc += (val_pred.cpu() == labels.cpu()).sum().item() # get the index of the class with the highest probability\n                val_loss += loss.item()\n\n            print('&#91;{:03d}\/{:03d}] Train Acc: {:3.6f} Loss: {:3.6f} | Val Acc: {:3.6f} loss: {:3.6f}'.format(\n                epoch + 1, num_epoch, train_acc\/len(train_set), train_loss\/len(train_loader), val_acc\/len(val_set), val_loss\/len(val_loader)\n            ))\n\n            # if the model improves, save a checkpoint at this epoch\n            if val_acc &gt; best_acc:\n                best_acc = val_acc\n                torch.save(model.state_dict(), model_path)\n                print('saving model with acc {:.3f}'.format(best_acc\/len(val_set)))\n    else:\n        print('&#91;{:03d}\/{:03d}] Train Acc: {:3.6f} Loss: {:3.6f}'.format(\n            epoch + 1, num_epoch, train_acc\/len(train_set), train_loss\/len(train_loader)\n        ))\n\n# if not validating, save the last epoch\nif len(val_set) == 0:\n    torch.save(model.state_dict(), model_path)\n    print('saving model at last epoch')\n<\/code><\/pre>\n\n\n\n<p>100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:55&lt;00:00, 37.28it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:08&lt;00:00, 61.31it\/s] [001\/100] Train Acc: 0.283196 Loss: 2.956003 | Val Acc: 0.365916 loss: 2.434805 saving model with acc 0.366 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:59&lt;00:00, 35.03it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:08&lt;00:00, 61.24it\/s] [002\/100] Train Acc: 0.390979 Loss: 2.268735 | Val Acc: 0.403733 loss: 2.182312 saving model with acc 0.404 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:57&lt;00:00, 36.05it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:07&lt;00:00, 68.30it\/s] [003\/100] Train Acc: 0.414018 Loss: 2.131366 | Val Acc: 0.416668 loss: 2.111082 saving model with acc 0.417 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:57&lt;00:00, 36.26it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:08&lt;00:00, 61.16it\/s] [004\/100] Train Acc: 0.422077 Loss: 2.084401 | Val Acc: 0.421995 loss: 2.078780 saving model with acc 0.422 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:55&lt;00:00, 37.35it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:08&lt;00:00, 63.79it\/s] [005\/100] Train Acc: 0.426466 Loss: 2.058605 | Val Acc: 0.425544 loss: 2.057722 saving model with acc 0.426 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:56&lt;00:00, 36.75it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:08&lt;00:00, 63.39it\/s] [006\/100] Train Acc: 0.429670 Loss: 2.040123 | Val Acc: 0.428562 loss: 2.041594 saving model with acc 0.429 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:55&lt;00:00, 37.52it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:08&lt;00:00, 61.07it\/s] [007\/100] Train Acc: 0.432267 Loss: 2.025459 | Val Acc: 0.430707 loss: 2.028616 saving model with acc 0.431 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:54&lt;00:00, 37.74it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:08&lt;00:00, 61.19it\/s] [008\/100] Train Acc: 0.434388 Loss: 2.013230 | Val Acc: 0.432638 loss: 2.017498 saving model with acc 0.433 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:55&lt;00:00, 37.58it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:08&lt;00:00, 61.99it\/s] [009\/100] Train Acc: 0.436352 Loss: 2.002708 | Val Acc: 0.434404 loss: 2.008038 saving model with acc 0.434 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:55&lt;00:00, 36.95it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:07&lt;00:00, 65.30it\/s] [010\/100] Train Acc: 0.437990 Loss: 1.993568 | Val Acc: 0.435757 loss: 1.999589 saving model with acc 0.436 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:56&lt;00:00, 36.84it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:07&lt;00:00, 66.84it\/s] [011\/100] Train Acc: 0.439461 Loss: 1.985492 | Val Acc: 0.437386 loss: 1.992147 saving model with acc 0.437 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:56&lt;00:00, 36.74it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:07&lt;00:00, 67.37it\/s] [012\/100] Train Acc: 0.440845 Loss: 1.978278 | Val Acc: 0.438602 loss: 1.985367 saving model with acc 0.439 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:56&lt;00:00, 36.79it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:07&lt;00:00, 66.08it\/s] [013\/100] Train Acc: 0.442134 Loss: 1.971737 | Val Acc: 0.439753 loss: 1.979366 saving model with acc 0.440 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:57&lt;00:00, 36.13it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:08&lt;00:00, 62.10it\/s] [014\/100] Train Acc: 0.443168 Loss: 1.965769 | Val Acc: 0.440897 loss: 1.973897 saving model with acc 0.441 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:55&lt;00:00, 37.24it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:08&lt;00:00, 60.37it\/s] [015\/100] Train Acc: 0.444228 Loss: 1.960339 | Val Acc: 0.442062 loss: 1.968790 saving model with acc 0.442 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:55&lt;00:00, 37.19it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:08&lt;00:00, 60.49it\/s] [016\/100] Train Acc: 0.445204 Loss: 1.955314 | Val Acc: 0.442691 loss: 1.964092 saving model with acc 0.443 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:57&lt;00:00, 35.96it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:08&lt;00:00, 64.31it\/s] [017\/100] Train Acc: 0.446156 Loss: 1.950657 | Val Acc: 0.443824 loss: 1.959666 saving model with acc 0.444 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:56&lt;00:00, 36.73it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:07&lt;00:00, 69.93it\/s] [018\/100] Train Acc: 0.447040 Loss: 1.946370 | Val Acc: 0.444512 loss: 1.955814 saving model with acc 0.445 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:55&lt;00:00, 37.06it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:07&lt;00:00, 68.65it\/s] [019\/100] Train Acc: 0.447772 Loss: 1.942352 | Val Acc: 0.445287 loss: 1.952020 saving model with acc 0.445 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:56&lt;00:00, 36.79it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:07&lt;00:00, 72.17it\/s] [020\/100] Train Acc: 0.448488 Loss: 1.938631 | Val Acc: 0.446359 loss: 1.948506 saving model with acc 0.446 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:58&lt;00:00, 35.60it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:07&lt;00:00, 70.33it\/s] [021\/100] Train Acc: 0.449211 Loss: 1.935159 | Val Acc: 0.446913 loss: 1.945285 saving model with acc 0.447 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:55&lt;00:00, 37.17it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:07&lt;00:00, 70.42it\/s] [022\/100] Train Acc: 0.449759 Loss: 1.931951 | Val Acc: 0.447633 loss: 1.942207 saving model with acc 0.448 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:55&lt;00:00, 37.31it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:07&lt;00:00, 71.23it\/s] [023\/100] Train Acc: 0.450402 Loss: 1.928946 | Val Acc: 0.448078 loss: 1.939327 saving model with acc 0.448 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:55&lt;00:00, 37.33it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:06&lt;00:00, 74.72it\/s] [024\/100] Train Acc: 0.451121 Loss: 1.926114 | Val Acc: 0.448444 loss: 1.936836 saving model with acc 0.448 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:57&lt;00:00, 36.16it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:08&lt;00:00, 64.49it\/s] [025\/100] Train Acc: 0.451586 Loss: 1.923470 | Val Acc: 0.449050 loss: 1.934270 saving model with acc 0.449 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:56&lt;00:00, 36.55it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:08&lt;00:00, 60.99it\/s] [026\/100] Train Acc: 0.452014 Loss: 1.920975 | Val Acc: 0.449929 loss: 1.932017 saving model with acc 0.450 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:55&lt;00:00, 37.01it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:08&lt;00:00, 60.82it\/s] [027\/100] Train Acc: 0.452486 Loss: 1.918612 | Val Acc: 0.449955 loss: 1.929963 saving model with acc 0.450 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:56&lt;00:00, 36.88it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:08&lt;00:00, 61.68it\/s] [028\/100] Train Acc: 0.453079 Loss: 1.916388 | Val Acc: 0.450687 loss: 1.927791 saving model with acc 0.451 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:56&lt;00:00, 36.28it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:08&lt;00:00, 62.70it\/s] [029\/100] Train Acc: 0.453480 Loss: 1.914284 | Val Acc: 0.451075 loss: 1.925931 saving model with acc 0.451 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:56&lt;00:00, 36.54it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:07&lt;00:00, 70.86it\/s] [030\/100] Train Acc: 0.453852 Loss: 1.912291 | Val Acc: 0.451348 loss: 1.924096 saving model with acc 0.451 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:56&lt;00:00, 36.39it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:07&lt;00:00, 70.75it\/s] [031\/100] Train Acc: 0.454214 Loss: 1.910424 | Val Acc: 0.451727 loss: 1.922332 saving model with acc 0.452 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:57&lt;00:00, 36.22it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:07&lt;00:00, 70.82it\/s] [032\/100] Train Acc: 0.454616 Loss: 1.908637 | Val Acc: 0.452117 loss: 1.920980 saving model with acc 0.452 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:58&lt;00:00, 35.20it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:08&lt;00:00, 60.16it\/s] [033\/100] Train Acc: 0.455022 Loss: 1.906924 | Val Acc: 0.452258 loss: 1.919316 saving model with acc 0.452 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:56&lt;00:00, 36.86it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:08&lt;00:00, 61.98it\/s] [034\/100] Train Acc: 0.455261 Loss: 1.905280 | Val Acc: 0.452386 loss: 1.917809 saving model with acc 0.452 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:54&lt;00:00, 37.74it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:08&lt;00:00, 63.56it\/s] [035\/100] Train Acc: 0.455582 Loss: 1.903720 | Val Acc: 0.452962 loss: 1.916526 saving model with acc 0.453 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:55&lt;00:00, 37.49it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:07&lt;00:00, 64.87it\/s] [036\/100] Train Acc: 0.455911 Loss: 1.902194 | Val Acc: 0.453360 loss: 1.914861 saving model with acc 0.453 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:57&lt;00:00, 35.82it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:08&lt;00:00, 60.37it\/s] [037\/100] Train Acc: 0.456176 Loss: 1.900760 | Val Acc: 0.453663 loss: 1.913680 saving model with acc 0.454 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:56&lt;00:00, 36.82it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:07&lt;00:00, 67.54it\/s] [038\/100] Train Acc: 0.456470 Loss: 1.899388 | Val Acc: 0.453826 loss: 1.912437 saving model with acc 0.454 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:55&lt;00:00, 37.05it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:06&lt;00:00, 74.22it\/s] [039\/100] Train Acc: 0.456719 Loss: 1.898045 | Val Acc: 0.454139 loss: 1.910982 saving model with acc 0.454 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:55&lt;00:00, 37.15it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:08&lt;00:00, 63.24it\/s] [040\/100] Train Acc: 0.456987 Loss: 1.896759 | Val Acc: 0.454131 loss: 1.909964 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:55&lt;00:00, 37.40it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:07&lt;00:00, 72.81it\/s] [041\/100] Train Acc: 0.457293 Loss: 1.895483 | Val Acc: 0.454453 loss: 1.909007 saving model with acc 0.454 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:55&lt;00:00, 37.36it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:07&lt;00:00, 73.40it\/s] [042\/100] Train Acc: 0.457459 Loss: 1.894299 | Val Acc: 0.454556 loss: 1.908012 saving model with acc 0.455 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:55&lt;00:00, 37.28it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:07&lt;00:00, 71.28it\/s] [043\/100] Train Acc: 0.457703 Loss: 1.893125 | Val Acc: 0.454916 loss: 1.906775 saving model with acc 0.455 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:56&lt;00:00, 36.33it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:08&lt;00:00, 61.18it\/s] [044\/100] Train Acc: 0.457984 Loss: 1.891975 | Val Acc: 0.455060 loss: 1.905893 saving model with acc 0.455 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:56&lt;00:00, 36.34it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:08&lt;00:00, 60.00it\/s] [045\/100] Train Acc: 0.458029 Loss: 1.890876 | Val Acc: 0.455124 loss: 1.904940 saving model with acc 0.455 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:55&lt;00:00, 37.01it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:08&lt;00:00, 60.60it\/s] [046\/100] Train Acc: 0.458300 Loss: 1.889813 | Val Acc: 0.455389 loss: 1.903937 saving model with acc 0.455 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:55&lt;00:00, 37.10it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:08&lt;00:00, 61.34it\/s] [047\/100] Train Acc: 0.458575 Loss: 1.888764 | Val Acc: 0.455431 loss: 1.902961 saving model with acc 0.455 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [01:01&lt;00:00, 33.53it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:08&lt;00:00, 63.54it\/s] [048\/100] Train Acc: 0.458721 Loss: 1.887728 | Val Acc: 0.455816 loss: 1.902145 saving model with acc 0.456 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:59&lt;00:00, 34.81it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:08&lt;00:00, 57.50it\/s] [049\/100] Train Acc: 0.458985 Loss: 1.886740 | Val Acc: 0.455757 loss: 1.901472 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:58&lt;00:00, 35.38it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:09&lt;00:00, 56.04it\/s] [050\/100] Train Acc: 0.459081 Loss: 1.885782 | Val Acc: 0.455651 loss: 1.900525 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [01:09&lt;00:00, 29.57it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:09&lt;00:00, 52.86it\/s] [051\/100] Train Acc: 0.459381 Loss: 1.884825 | Val Acc: 0.456032 loss: 1.899771 saving model with acc 0.456 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [01:05&lt;00:00, 31.64it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:09&lt;00:00, 54.95it\/s] [052\/100] Train Acc: 0.459510 Loss: 1.883918 | Val Acc: 0.456098 loss: 1.898842 saving model with acc 0.456 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [01:07&lt;00:00, 30.80it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:09&lt;00:00, 54.03it\/s] [053\/100] Train Acc: 0.459717 Loss: 1.883032 | Val Acc: 0.456259 loss: 1.898125 saving model with acc 0.456 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:58&lt;00:00, 35.11it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:10&lt;00:00, 51.11it\/s] [054\/100] Train Acc: 0.459898 Loss: 1.882123 | Val Acc: 0.456358 loss: 1.897445 saving model with acc 0.456 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [01:01&lt;00:00, 33.62it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:08&lt;00:00, 58.09it\/s] [055\/100] Train Acc: 0.460116 Loss: 1.881261 | Val Acc: 0.456536 loss: 1.896433 saving model with acc 0.457 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:59&lt;00:00, 34.70it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:08&lt;00:00, 59.87it\/s] [056\/100] Train Acc: 0.460199 Loss: 1.880439 | Val Acc: 0.456492 loss: 1.895816 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:54&lt;00:00, 37.72it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:08&lt;00:00, 61.60it\/s] [057\/100] Train Acc: 0.460341 Loss: 1.879603 | Val Acc: 0.456759 loss: 1.895241 saving model with acc 0.457 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:54&lt;00:00, 37.96it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:08&lt;00:00, 61.79it\/s] [058\/100] Train Acc: 0.460510 Loss: 1.878778 | Val Acc: 0.457026 loss: 1.894500 saving model with acc 0.457 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:55&lt;00:00, 36.97it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:08&lt;00:00, 62.95it\/s] [059\/100] Train Acc: 0.460636 Loss: 1.877990 | Val Acc: 0.456915 loss: 1.893863 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:55&lt;00:00, 37.23it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:07&lt;00:00, 70.85it\/s] [060\/100] Train Acc: 0.460799 Loss: 1.877219 | Val Acc: 0.457263 loss: 1.893090 saving model with acc 0.457 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:55&lt;00:00, 37.47it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:07&lt;00:00, 70.51it\/s] [061\/100] Train Acc: 0.460986 Loss: 1.876462 | Val Acc: 0.457087 loss: 1.892663 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:54&lt;00:00, 37.86it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:07&lt;00:00, 65.45it\/s] [062\/100] Train Acc: 0.461094 Loss: 1.875714 | Val Acc: 0.457296 loss: 1.891906 saving model with acc 0.457 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:55&lt;00:00, 37.28it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:08&lt;00:00, 64.02it\/s] [063\/100] Train Acc: 0.461261 Loss: 1.874982 | Val Acc: 0.457419 loss: 1.891563 saving model with acc 0.457 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:54&lt;00:00, 38.00it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:07&lt;00:00, 65.85it\/s] [064\/100] Train Acc: 0.461345 Loss: 1.874242 | Val Acc: 0.457623 loss: 1.890625 saving model with acc 0.458 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:54&lt;00:00, 38.06it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:08&lt;00:00, 64.10it\/s] [065\/100] Train Acc: 0.461536 Loss: 1.873522 | Val Acc: 0.457767 loss: 1.890202 saving model with acc 0.458 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:54&lt;00:00, 38.18it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:07&lt;00:00, 64.59it\/s] [066\/100] Train Acc: 0.461673 Loss: 1.872819 | Val Acc: 0.457881 loss: 1.889742 saving model with acc 0.458 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:56&lt;00:00, 36.42it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:07&lt;00:00, 66.19it\/s] [067\/100] Train Acc: 0.461787 Loss: 1.872144 | Val Acc: 0.457760 loss: 1.889154 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:56&lt;00:00, 36.73it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:07&lt;00:00, 68.31it\/s] [068\/100] Train Acc: 0.461906 Loss: 1.871492 | Val Acc: 0.458173 loss: 1.888489 saving model with acc 0.458 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:59&lt;00:00, 34.99it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:08&lt;00:00, 60.90it\/s] [069\/100] Train Acc: 0.462087 Loss: 1.870809 | Val Acc: 0.458332 loss: 1.887800 saving model with acc 0.458 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [01:12&lt;00:00, 28.46it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:09&lt;00:00, 53.86it\/s] [070\/100] Train Acc: 0.462223 Loss: 1.870163 | Val Acc: 0.458453 loss: 1.887355 saving model with acc 0.458 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [01:04&lt;00:00, 32.00it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:09&lt;00:00, 53.15it\/s] [071\/100] Train Acc: 0.462292 Loss: 1.869491 | Val Acc: 0.458453 loss: 1.887015 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [01:00&lt;00:00, 34.15it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:08&lt;00:00, 58.77it\/s] [072\/100] Train Acc: 0.462522 Loss: 1.868878 | Val Acc: 0.458529 loss: 1.886238 saving model with acc 0.459 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:59&lt;00:00, 34.94it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:08&lt;00:00, 61.02it\/s] [073\/100] Train Acc: 0.462546 Loss: 1.868245 | Val Acc: 0.458944 loss: 1.885764 saving model with acc 0.459 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:59&lt;00:00, 34.98it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:08&lt;00:00, 57.37it\/s] [074\/100] Train Acc: 0.462739 Loss: 1.867639 | Val Acc: 0.458730 loss: 1.885303 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:57&lt;00:00, 36.23it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:08&lt;00:00, 58.49it\/s] [075\/100] Train Acc: 0.462773 Loss: 1.867054 | Val Acc: 0.458781 loss: 1.884844 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:58&lt;00:00, 35.44it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:07&lt;00:00, 65.41it\/s] [076\/100] Train Acc: 0.462957 Loss: 1.866452 | Val Acc: 0.458967 loss: 1.884511 saving model with acc 0.459 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:58&lt;00:00, 35.26it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:08&lt;00:00, 57.90it\/s] [077\/100] Train Acc: 0.463071 Loss: 1.865853 | Val Acc: 0.459239 loss: 1.883732 saving model with acc 0.459 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:58&lt;00:00, 35.58it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:08&lt;00:00, 58.51it\/s] [078\/100] Train Acc: 0.463167 Loss: 1.865279 | Val Acc: 0.458987 loss: 1.883295 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:56&lt;00:00, 36.34it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:08&lt;00:00, 61.42it\/s] [079\/100] Train Acc: 0.463261 Loss: 1.864719 | Val Acc: 0.459186 loss: 1.882890 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [01:00&lt;00:00, 34.38it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:08&lt;00:00, 61.18it\/s] [080\/100] Train Acc: 0.463340 Loss: 1.864145 | Val Acc: 0.459205 loss: 1.882407 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:57&lt;00:00, 35.88it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:08&lt;00:00, 61.33it\/s] [081\/100] Train Acc: 0.463554 Loss: 1.863579 | Val Acc: 0.459459 loss: 1.881937 saving model with acc 0.459 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:58&lt;00:00, 35.08it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:07&lt;00:00, 64.66it\/s] [082\/100] Train Acc: 0.463750 Loss: 1.863056 | Val Acc: 0.459543 loss: 1.881626 saving model with acc 0.460 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:58&lt;00:00, 35.38it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:07&lt;00:00, 64.78it\/s] [083\/100] Train Acc: 0.463698 Loss: 1.862503 | Val Acc: 0.459539 loss: 1.881191 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:58&lt;00:00, 35.32it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:08&lt;00:00, 58.59it\/s] [084\/100] Train Acc: 0.463868 Loss: 1.861979 | Val Acc: 0.459512 loss: 1.880576 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:57&lt;00:00, 35.83it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:08&lt;00:00, 60.91it\/s] [085\/100] Train Acc: 0.463994 Loss: 1.861431 | Val Acc: 0.459776 loss: 1.880033 saving model with acc 0.460 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:57&lt;00:00, 35.91it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:07&lt;00:00, 65.50it\/s] [086\/100] Train Acc: 0.464049 Loss: 1.860918 | Val Acc: 0.459662 loss: 1.879941 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:57&lt;00:00, 35.91it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:07&lt;00:00, 69.31it\/s] [087\/100] Train Acc: 0.464190 Loss: 1.860406 | Val Acc: 0.459978 loss: 1.879342 saving model with acc 0.460 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:57&lt;00:00, 36.22it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:08&lt;00:00, 62.61it\/s] [088\/100] Train Acc: 0.464191 Loss: 1.859903 | Val Acc: 0.460050 loss: 1.879095 saving model with acc 0.460 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:57&lt;00:00, 35.95it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:08&lt;00:00, 61.68it\/s] [089\/100] Train Acc: 0.464335 Loss: 1.859389 | Val Acc: 0.459982 loss: 1.878472 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:56&lt;00:00, 36.75it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:07&lt;00:00, 65.84it\/s] [090\/100] Train Acc: 0.464477 Loss: 1.858915 | Val Acc: 0.459884 loss: 1.878217 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:55&lt;00:00, 37.40it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:08&lt;00:00, 60.87it\/s] [091\/100] Train Acc: 0.464511 Loss: 1.858426 | Val Acc: 0.460247 loss: 1.877929 saving model with acc 0.460 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:55&lt;00:00, 37.13it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:08&lt;00:00, 63.50it\/s] [092\/100] Train Acc: 0.464689 Loss: 1.857926 | Val Acc: 0.460113 loss: 1.877566 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:57&lt;00:00, 35.71it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:07&lt;00:00, 72.81it\/s] [093\/100] Train Acc: 0.464692 Loss: 1.857448 | Val Acc: 0.460380 loss: 1.876994 saving model with acc 0.460 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:56&lt;00:00, 36.44it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:07&lt;00:00, 72.37it\/s] [094\/100] Train Acc: 0.464821 Loss: 1.856997 | Val Acc: 0.460418 loss: 1.876573 saving model with acc 0.460 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:56&lt;00:00, 36.76it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:07&lt;00:00, 67.15it\/s] [095\/100] Train Acc: 0.464949 Loss: 1.856516 | Val Acc: 0.460291 loss: 1.876206 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:56&lt;00:00, 36.48it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:08&lt;00:00, 63.35it\/s] [096\/100] Train Acc: 0.464960 Loss: 1.856054 | Val Acc: 0.460395 loss: 1.875968 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:56&lt;00:00, 36.46it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:08&lt;00:00, 59.91it\/s] [097\/100] Train Acc: 0.465175 Loss: 1.855608 | Val Acc: 0.460422 loss: 1.875492 saving model with acc 0.460 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:55&lt;00:00, 37.19it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:08&lt;00:00, 60.70it\/s] [098\/100] Train Acc: 0.465203 Loss: 1.855148 | Val Acc: 0.460865 loss: 1.875298 saving model with acc 0.461 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:56&lt;00:00, 36.84it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:08&lt;00:00, 61.92it\/s] [099\/100] Train Acc: 0.465197 Loss: 1.854699 | Val Acc: 0.460740 loss: 1.874919 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2067\/2067 [00:55&lt;00:00, 36.99it\/s] 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 516\/516 [00:08&lt;00:00, 61.21it\/s][100\/100] Train Acc: 0.465276 Loss: 1.854267 | Val Acc: 0.460742 loss: 1.874491<\/p>\n\n\n\n<pre class=\"wp-block-code\"><code>del train_loader, val_loader\ngc.collect()<\/code><\/pre>\n\n\n\n<p>0<\/p>\n\n\n\n<p>\u6d4b\u8bd5<\/p>\n\n\n\n<pre class=\"wp-block-code\"><code># load data\ntest_X = preprocess_data(split='test', feat_dir='.\/libriphone\/feat', phone_path='.\/libriphone', concat_nframes=concat_nframes)\ntest_set = LibriDataset(test_X, None)\ntest_loader = DataLoader(test_set, batch_size=batch_size, shuffle=False)<\/code><\/pre>\n\n\n\n<p>[Dataset] &#8211; # phone classes: 41, number of utterances for test: 1078 1078it [00:02, 398.82it\/s][INFO] test set torch.Size([646268, 39])<\/p>\n\n\n\n<pre class=\"wp-block-code\"><code># load model\nmodel = Classifier(input_dim=input_dim, hidden_layers=hidden_layers, hidden_dim=hidden_dim).to(device)\nmodel.load_state_dict(torch.load(model_path))<\/code><\/pre>\n\n\n\n<p>&lt;All keys matched successfully&gt;<\/p>\n\n\n\n<p>\u9884\u6d4b<\/p>\n\n\n\n<pre class=\"wp-block-code\"><code>test_acc = 0.0\ntest_lengths = 0\npred = np.array(&#91;], dtype=np.int32)\n\nmodel.eval()\nwith torch.no_grad():\n    for i, batch in enumerate(tqdm(test_loader)):\n        features = batch\n        features = features.to(device)\n\n        outputs = model(features)\n\n        _, test_pred = torch.max(outputs, 1) # get the index of the class with the highest probability\n        pred = np.concatenate((pred, test_pred.cpu().numpy()), axis=0)<\/code><\/pre>\n\n\n\n<p>100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 632\/632 [00:05&lt;00:00, 113.55it\/s]<\/p>\n\n\n\n<p>\u5199\u5165\u6587\u4ef6<\/p>\n\n\n\n<pre class=\"wp-block-code\"><code>with open('prediction.csv', 'w') as f:\n    f.write('Id,Class\\n')\n    for i, y in enumerate(pred):\n        f.write('{},{}\\n'.format(i, y))<\/code><\/pre>\n\n\n\n<hr class=\"wp-block-separator has-alpha-channel-opacity\"\/>\n\n\n\n<p>\u4ee5\u4e0a\u5c31\u662fHW2\u7684\u57fa\u7840\u4ee3\u7801\uff0c\u7b49\u6211\u53d8\u5f3a\u4e4b\u540e\u4e00\u5b9a\u56de\u6765\uff01<\/p>\n","protected":false},"excerpt":{"rendered":"<p>\u3010HW2\u3011slides0.1\u674e\u5b8f\u6bc52021\/2022\u6625\u673a\u5668\u5b66\u4e60\u8bfe\u7a0b\u7b14\u8bb0EP7(P28-P30) \u4ece\u4eca\u5929\u5f00\u59cb\u6211\u5c06 [&hellip;]<\/p>\n","protected":false},"author":1,"featured_media":0,"comment_status":"open","ping_status":"open","sticky":false,"template":"","format":"standard","meta":{"footnotes":""},"categories":[6],"tags":[15,3,7,9,8],"class_list":["post-278","post","type-post","status-publish","format-standard","hentry","category-lhyjqxxbj","tag-homework","tag-xxbj","tag-jjxx","tag-lhy","tag-deeplearning"],"_links":{"self":[{"href":"https:\/\/tobykskgd.life\/index.php\/wp-json\/wp\/v2\/posts\/278","targetHints":{"allow":["GET"]}}],"collection":[{"href":"https:\/\/tobykskgd.life\/index.php\/wp-json\/wp\/v2\/posts"}],"about":[{"href":"https:\/\/tobykskgd.life\/index.php\/wp-json\/wp\/v2\/types\/post"}],"author":[{"embeddable":true,"href":"https:\/\/tobykskgd.life\/index.php\/wp-json\/wp\/v2\/users\/1"}],"replies":[{"embeddable":true,"href":"https:\/\/tobykskgd.life\/index.php\/wp-json\/wp\/v2\/comments?post=278"}],"version-history":[{"count":2,"href":"https:\/\/tobykskgd.life\/index.php\/wp-json\/wp\/v2\/posts\/278\/revisions"}],"predecessor-version":[{"id":1869,"href":"https:\/\/tobykskgd.life\/index.php\/wp-json\/wp\/v2\/posts\/278\/revisions\/1869"}],"wp:attachment":[{"href":"https:\/\/tobykskgd.life\/index.php\/wp-json\/wp\/v2\/media?parent=278"}],"wp:term":[{"taxonomy":"category","embeddable":true,"href":"https:\/\/tobykskgd.life\/index.php\/wp-json\/wp\/v2\/categories?post=278"},{"taxonomy":"post_tag","embeddable":true,"href":"https:\/\/tobykskgd.life\/index.php\/wp-json\/wp\/v2\/tags?post=278"}],"curies":[{"name":"wp","href":"https:\/\/api.w.org\/{rel}","templated":true}]}}