diff --git a/COMP3217.docx b/COMP3217.docx new file mode 100644 index 0000000000000000000000000000000000000000..e4bd96f44bf0065b1cc577a2cd42134a8fa508e1 Binary files /dev/null and b/COMP3217.docx differ diff --git a/part1 (2).ipynb b/part1 (2).ipynb deleted file mode 100644 index 93a16f5a426677ad43fd92407f797760c255ae26..0000000000000000000000000000000000000000 --- a/part1 (2).ipynb +++ /dev/null @@ -1,537 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [], - "source": [ - "import pandas as pd\n", - "import numpy as np\n", - "import sklearn\n", - "import scipy\n", - "from sklearn.model_selection import train_test_split\n", - "from sklearn.metrics import accuracy_score\n", - "from sklearn.preprocessing import StandardScaler\n", - "from sklearn.linear_model import LogisticRegression\n", - "import matplotlib.pyplot as plt\n", - "from sklearn.decomposition import PCA\n", - "from sklearn.impute import SimpleImputer\n", - "from sklearn.model_selection import GridSearchCV\n", - "import numpy as np\n", - "import pandas as pd\n", - "from sklearn.preprocessing import StandardScaler\n", - "from sklearn.impute import SimpleImputer\n", - "from sklearn.decomposition import PCA\n", - "from sklearn.model_selection import train_test_split\n", - "from sklearn.linear_model import LogisticRegression\n", - "from sklearn.metrics import accuracy_score, confusion_matrix, classification_report\n", - "from sklearn.model_selection import GridSearchCV\n", - "import pandas as pd\n", - "from sklearn.impute import SimpleImputer\n", - "from sklearn.model_selection import RandomizedSearchCV\n" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [], - "source": [ - "#Read CSV file as Pandas Dataframe\n", - "train_df = pd.read_csv('TrainingDataBinary.csv')" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "<class 'pandas.core.frame.DataFrame'>\n", - "RangeIndex: 6000 entries, 0 to 5999\n", - "Columns: 129 entries, 1 to 129\n", - "dtypes: float64(112), int64(17)\n", - "memory usage: 5.9 MB\n", - "None\n" - ] - } - ], - "source": [ - "print(train_df.info())" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "(array([3000., 0., 0., 0., 0., 0., 0., 0., 0.,\n", - " 3000.]),\n", - " array([0. , 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1. ]),\n", - " <BarContainer object of 10 artists>)" - ] - }, - "execution_count": 4, - "metadata": {}, - "output_type": "execute_result" - }, - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAjAAAAGdCAYAAAAMm0nCAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy88F64QAAAACXBIWXMAAA9hAAAPYQGoP6dpAAAmMElEQVR4nO3dfXRU5YHH8V8IzAQ0kxgxmWQJyMsRCAJqqGGqUNBsAkSqRzyVQoG2ERY78RxIFzAry2tr2Phei3KstbFnoYB7xNUEAyEYqBBAU7Lymi0SN3hggorJAEJe7/7Rk6sjAZ2YF574/Zxzz3HufebOc5+i8+3kTgixLMsSAACAQbp19gQAAACCRcAAAADjEDAAAMA4BAwAADAOAQMAAIxDwAAAAOMQMAAAwDgEDAAAME73zp5Ae2lqatLJkycVHh6ukJCQzp4OAAD4FizL0tmzZxUXF6du3S7/OUuXDZiTJ08qPj6+s6cBAABa4cSJE+rTp89lj3fZgAkPD5f0jwVwuVydPBsAAPBt+P1+xcfH2+/jl9NlA6b5x0Yul4uAAQDAMN90+wc38QIAAOMQMAAAwDgEDAAAMA4BAwAAjEPAAAAA4xAwAADAOAQMAAAwDgEDAACMQ8AAAADjEDAAAMA4QQXMiy++qBEjRti/nt/j8ejtt9+2j1+8eFFer1fXX3+9rr32Wk2ZMkVVVVUB56isrFRaWpp69eql6OhoLViwQA0NDQFjiouLddttt8npdGrQoEHKzc1t/RUCAIAuJ6iA6dOnj1atWqXS0lK9//77uuuuu3Tvvffq0KFDkqT58+frrbfe0muvvaYdO3bo5MmTuv/+++3nNzY2Ki0tTXV1ddq9e7deffVV5ebmasmSJfaYiooKpaWlafz48SorK9O8efP00EMPacuWLW10yQAAwHQhlmVZ3+UEUVFReuKJJ/TAAw/ohhtu0Lp16/TAAw9Iko4ePaqhQ4eqpKREo0eP1ttvv6177rlHJ0+eVExMjCRpzZo1WrRokT755BM5HA4tWrRI+fn5OnjwoP0aU6dOVXV1tQoKCr71vPx+vyIiIlRTU8Nf5ggAgCG+7ft3q++BaWxs1Pr163X+/Hl5PB6Vlpaqvr5eycnJ9pghQ4aob9++KikpkSSVlJRo+PDhdrxIUmpqqvx+v/0pTklJScA5msc0n+Nyamtr5ff7AzYAANA1dQ/2CQcOHJDH49HFixd17bXXatOmTUpISFBZWZkcDociIyMDxsfExMjn80mSfD5fQLw0H28+dqUxfr9fFy5cUM+ePVucV3Z2tpYvXx7s5bTKjY/md8jrtKWPVqV19hQAAJfB+0rwgv4EZvDgwSorK9PevXv18MMPa9asWTp8+HB7zC0oWVlZqqmpsbcTJ0509pQAAEA7CfoTGIfDoUGDBkmSEhMT9d577+m5557Tgw8+qLq6OlVXVwd8ClNVVSW32y1Jcrvd2rdvX8D5mr+l9NUxX//mUlVVlVwu12U/fZEkp9Mpp9MZ7OUAAAADfeffA9PU1KTa2lolJiaqR48eKioqso+Vl5ersrJSHo9HkuTxeHTgwAGdPn3aHlNYWCiXy6WEhAR7zFfP0Tym+RwAAABBfQKTlZWliRMnqm/fvjp79qzWrVun4uJibdmyRREREUpPT1dmZqaioqLkcrn0yCOPyOPxaPTo0ZKklJQUJSQkaMaMGcrJyZHP59PixYvl9XrtT0/mzp2r3//+91q4cKF++ctfavv27dq4caPy8837+SAAAGgfQQXM6dOnNXPmTJ06dUoREREaMWKEtmzZon/+53+WJD3zzDPq1q2bpkyZotraWqWmpuqFF16wnx8aGqq8vDw9/PDD8ng8uuaaazRr1iytWLHCHtO/f3/l5+dr/vz5eu6559SnTx+9/PLLSk1NbaNLBgAApvvOvwfmatWevweGu8UBAG2J95UvtfvvgQEAAOgsBAwAADAOAQMAAIxDwAAAAOMQMAAAwDgEDAAAMA4BAwAAjEPAAAAA4xAwAADAOAQMAAAwDgEDAACMQ8AAAADjEDAAAMA4BAwAADAOAQMAAIxDwAAAAOMQMAAAwDgEDAAAMA4BAwAAjEPAAAAA4xAwAADAOAQMAAAwDgEDAACMQ8AAAADjEDAAAMA4BAwAADAOAQMAAIxDwAAAAOMQMAAAwDgEDAAAMA4BAwAAjEPAAAAA4xAwAADAOAQMAAAwDgEDAACMQ8AAAADjEDAAAMA4BAwAADAOAQMAAIxDwAAAAOMQMAAAwDgEDAAAMA4BAwAAjEPAAAAA4xAwAADAOAQMAAAwDgEDAACMQ8AAAADjBBUw2dnZ+sEPfqDw8HBFR0frvvvuU3l5ecCYcePGKSQkJGCbO3duwJjKykqlpaWpV69eio6O1oIFC9TQ0BAwpri4WLfddpucTqcGDRqk3Nzc1l0hAADocoIKmB07dsjr9WrPnj0qLCxUfX29UlJSdP78+YBxs2fP1qlTp+wtJyfHPtbY2Ki0tDTV1dVp9+7devXVV5Wbm6slS5bYYyoqKpSWlqbx48errKxM8+bN00MPPaQtW7Z8x8sFAABdQfdgBhcUFAQ8zs3NVXR0tEpLSzV27Fh7f69eveR2u1s8x9atW3X48GFt27ZNMTExuuWWW7Ry5UotWrRIy5Ytk8Ph0Jo1a9S/f3899dRTkqShQ4fq3Xff1TPPPKPU1NRgrxEAAHQx3+kemJqaGklSVFRUwP61a9eqd+/euvnmm5WVlaUvvvjCPlZSUqLhw4crJibG3peamiq/369Dhw7ZY5KTkwPOmZqaqpKSksvOpba2Vn6/P2ADAABdU1CfwHxVU1OT5s2bpzvuuEM333yzvX/atGnq16+f4uLi9MEHH2jRokUqLy/X66+/Lkny+XwB8SLJfuzz+a44xu/368KFC+rZs+cl88nOztby5ctbezkAAMAgrQ4Yr9ergwcP6t133w3YP2fOHPufhw8frtjYWN1999368MMPNXDgwNbP9BtkZWUpMzPTfuz3+xUfH99urwcAADpPq36ElJGRoby8PL3zzjvq06fPFccmJSVJko4dOyZJcrvdqqqqChjT/Lj5vpnLjXG5XC1++iJJTqdTLpcrYAMAAF1TUAFjWZYyMjK0adMmbd++Xf379//G55SVlUmSYmNjJUkej0cHDhzQ6dOn7TGFhYVyuVxKSEiwxxQVFQWcp7CwUB6PJ5jpAgCALiqogPF6vfrP//xPrVu3TuHh4fL5fPL5fLpw4YIk6cMPP9TKlStVWlqqjz76SG+++aZmzpypsWPHasSIEZKklJQUJSQkaMaMGfqf//kfbdmyRYsXL5bX65XT6ZQkzZ07V8ePH9fChQt19OhRvfDCC9q4caPmz5/fxpcPAABMFFTAvPjii6qpqdG4ceMUGxtrbxs2bJAkORwObdu2TSkpKRoyZIh+/etfa8qUKXrrrbfsc4SGhiovL0+hoaHyeDz62c9+ppkzZ2rFihX2mP79+ys/P1+FhYUaOXKknnrqKb388st8hRoAAEgK8iZey7KueDw+Pl47duz4xvP069dPmzdvvuKYcePGaf/+/cFMDwAAfE/wdyEBAADjEDAAAMA4BAwAADAOAQMAAIxDwAAAAOMQMAAAwDgEDAAAMA4BAwAAjEPAAAAA4xAwAADAOAQMAAAwDgEDAACMQ8AAAADjEDAAAMA4BAwAADAOAQMAAIxDwAAAAOMQMAAAwDgEDAAAMA4BAwAAjEPAAAAA4xAwAADAOAQMAAAwDgEDAACMQ8AAAADjEDAAAMA4BAwAADAOAQMAAIxDwAAAAOMQMAAAwDgEDAAAMA4BAwAAjEPAAAAA4xAwAADAOAQMAAAwDgEDAACMQ8AAAADjEDAAAMA4BAwAADAOAQMAAIxDwAAAAOMQMAAAwDgEDAAAMA4BAwAAjEPAAAAA4xAwAADAOAQMAAAwTlABk52drR/84AcKDw9XdHS07rvvPpWXlweMuXjxorxer66//npde+21mjJliqqqqgLGVFZWKi0tTb169VJ0dLQWLFighoaGgDHFxcW67bbb5HQ6NWjQIOXm5rbuCgEAQJcTVMDs2LFDXq9Xe/bsUWFhoerr65WSkqLz58/bY+bPn6+33npLr732mnbs2KGTJ0/q/vvvt483NjYqLS1NdXV12r17t1599VXl5uZqyZIl9piKigqlpaVp/PjxKisr07x58/TQQw9py5YtbXDJAADAdCGWZVmtffInn3yi6Oho7dixQ2PHjlVNTY1uuOEGrVu3Tg888IAk6ejRoxo6dKhKSko0evRovf3227rnnnt08uRJxcTESJLWrFmjRYsW6ZNPPpHD4dCiRYuUn5+vgwcP2q81depUVVdXq6Cg4FvNze/3KyIiQjU1NXK5XK29xBbd+Gh+m56vI3y0Kq2zpwAAuAzeV770bd+/v9M9MDU1NZKkqKgoSVJpaanq6+uVnJxsjxkyZIj69u2rkpISSVJJSYmGDx9ux4skpaamyu/369ChQ/aYr56jeUzzOQAAwPdb99Y+sampSfPmzdMdd9yhm2++WZLk8/nkcDgUGRkZMDYmJkY+n88e89V4aT7efOxKY/x+vy5cuKCePXteMp/a2lrV1tbaj/1+f2svDQAAXOVa/QmM1+vVwYMHtX79+racT6tlZ2crIiLC3uLj4zt7SgAAoJ20KmAyMjKUl5end955R3369LH3u91u1dXVqbq6OmB8VVWV3G63Pebr30pqfvxNY1wuV4ufvkhSVlaWampq7O3EiROtuTQAAGCAoALGsixlZGRo06ZN2r59u/r37x9wPDExUT169FBRUZG9r7y8XJWVlfJ4PJIkj8ejAwcO6PTp0/aYwsJCuVwuJSQk2GO+eo7mMc3naInT6ZTL5QrYAABA1xTUPTBer1fr1q3Tf//3fys8PNy+ZyUiIkI9e/ZURESE0tPTlZmZqaioKLlcLj3yyCPyeDwaPXq0JCklJUUJCQmaMWOGcnJy5PP5tHjxYnm9XjmdTknS3Llz9fvf/14LFy7UL3/5S23fvl0bN25Ufr55d2kDAIC2F9QnMC+++KJqamo0btw4xcbG2tuGDRvsMc8884zuueceTZkyRWPHjpXb7dbrr79uHw8NDVVeXp5CQ0Pl8Xj0s5/9TDNnztSKFSvsMf3791d+fr4KCws1cuRIPfXUU3r55ZeVmpraBpcMAABM951+D8zVjN8DE4jfAwMAVy/eV77UIb8HBgAAoDMQMAAAwDgEDAAAMA4BAwAAjEPAAAAA4xAwAADAOAQMAAAwDgEDAACMQ8AAAADjEDAAAMA4BAwAADAOAQMAAIxDwAAAAOMQMAAAwDgEDAAAMA4BAwAAjEPAAAAA4xAwAADAOAQMAAAwDgEDAACMQ8AAAADjEDAAAMA4BAwAADAOAQMAAIxDwAAAAOMQMAAAwDgEDAAAMA4BAwAAjEPAAAAA4xAwAADAOAQMAAAwDgEDAACMQ8AAAADjEDAAAMA4BAwAADAOAQMAAIxDwAAAAOMQMAAAwDgEDAAAMA4BAwAAjEPAAAAA4xAwAADAOAQMAAAwDgEDAACMQ8AAAADjEDAAAMA4BAwAADAOAQMAAIwTdMDs3LlTkydPVlxcnEJCQvTGG28EHP/5z3+ukJCQgG3ChAkBY86cOaPp06fL5XIpMjJS6enpOnfuXMCYDz74QGPGjFFYWJji4+OVk5MT/NUBAIAuKeiAOX/+vEaOHKnVq1dfdsyECRN06tQpe/vLX/4ScHz69Ok6dOiQCgsLlZeXp507d2rOnDn2cb/fr5SUFPXr10+lpaV64okntGzZMr300kvBThcAAHRB3YN9wsSJEzVx4sQrjnE6nXK73S0eO3LkiAoKCvTee+9p1KhRkqTnn39ekyZN0pNPPqm4uDitXbtWdXV1euWVV+RwODRs2DCVlZXp6aefDggdAADw/dQu98AUFxcrOjpagwcP1sMPP6zPPvvMPlZSUqLIyEg7XiQpOTlZ3bp10969e+0xY8eOlcPhsMekpqaqvLxcn3/+eYuvWVtbK7/fH7ABAICuqc0DZsKECfrzn/+soqIi/cd//Id27NihiRMnqrGxUZLk8/kUHR0d8Jzu3bsrKipKPp/PHhMTExMwpvlx85ivy87OVkREhL3Fx8e39aUBAICrRNA/QvomU6dOtf95+PDhGjFihAYOHKji4mLdfffdbf1ytqysLGVmZtqP/X4/EQMAQBfV7l+jHjBggHr37q1jx45Jktxut06fPh0wpqGhQWfOnLHvm3G73aqqqgoY0/z4cvfWOJ1OuVyugA0AAHRN7R4wH3/8sT777DPFxsZKkjwej6qrq1VaWmqP2b59u5qampSUlGSP2blzp+rr6+0xhYWFGjx4sK677rr2njIAALjKBR0w586dU1lZmcrKyiRJFRUVKisrU2Vlpc6dO6cFCxZoz549+uijj1RUVKR7771XgwYNUmpqqiRp6NChmjBhgmbPnq19+/Zp165dysjI0NSpUxUXFydJmjZtmhwOh9LT03Xo0CFt2LBBzz33XMCPiAAAwPdX0AHz/vvv69Zbb9Wtt94qScrMzNStt96qJUuWKDQ0VB988IF+/OMf66abblJ6eroSExP117/+VU6n0z7H2rVrNWTIEN19992aNGmS7rzzzoDf8RIREaGtW7eqoqJCiYmJ+vWvf60lS5bwFWoAACCpFTfxjhs3TpZlXfb4li1bvvEcUVFRWrdu3RXHjBgxQn/961+DnR4AAPge4O9CAgAAxiFgAACAcQgYAABgHAIGAAAYh4ABAADGIWAAAIBxCBgAAGAcAgYAABiHgAEAAMYhYAAAgHEIGAAAYBwCBgAAGIeAAQAAxiFgAACAcQgYAABgHAIGAAAYh4ABAADGIWAAAIBxCBgAAGAcAgYAABiHgAEAAMYhYAAAgHEIGAAAYBwCBgAAGIeAAQAAxiFgAACAcQgYAABgHAIGAAAYh4ABAADGIWAAAIBxCBgAAGAcAgYAABiHgAEAAMYhYAAAgHEIGAAAYBwCBgAAGIeAAQAAxiFgAACAcQgYAABgHAIGAAAYh4ABAADGIWAAAIBxCBgAAGAcAgYAABiHgAEAAMYhYAAAgHEIGAAAYBwCBgAAGCfogNm5c6cmT56suLg4hYSE6I033gg4blmWlixZotjYWPXs2VPJycn6+9//HjDmzJkzmj59ulwulyIjI5Wenq5z584FjPnggw80ZswYhYWFKT4+Xjk5OcFfHQAA6JKCDpjz589r5MiRWr16dYvHc3Jy9Lvf/U5r1qzR3r17dc011yg1NVUXL160x0yfPl2HDh1SYWGh8vLytHPnTs2ZM8c+7vf7lZKSon79+qm0tFRPPPGEli1bppdeeqkVlwgAALqa7sE+YeLEiZo4cWKLxyzL0rPPPqvFixfr3nvvlST9+c9/VkxMjN544w1NnTpVR44cUUFBgd577z2NGjVKkvT8889r0qRJevLJJxUXF6e1a9eqrq5Or7zyihwOh4YNG6aysjI9/fTTAaEDAAC+n9r0HpiKigr5fD4lJyfb+yIiIpSUlKSSkhJJUklJiSIjI+14kaTk5GR169ZNe/futceMHTtWDofDHpOamqry8nJ9/vnnLb52bW2t/H5/wAYAALqmNg0Yn88nSYqJiQnYHxMTYx/z+XyKjo4OON69e3dFRUUFjGnpHF99ja/Lzs5WRESEvcXHx3/3CwIAAFelLvMtpKysLNXU1NjbiRMnOntKAACgnbRpwLjdbklSVVVVwP6qqir7mNvt1unTpwOONzQ06MyZMwFjWjrHV1/j65xOp1wuV8AGAAC6pjYNmP79+8vtdquoqMje5/f7tXfvXnk8HkmSx+NRdXW1SktL7THbt29XU1OTkpKS7DE7d+5UfX29PaawsFCDBw/Wdddd15ZTBgAABgo6YM6dO6eysjKVlZVJ+seNu2VlZaqsrFRISIjmzZun3/zmN3rzzTd14MABzZw5U3FxcbrvvvskSUOHDtWECRM0e/Zs7du3T7t27VJGRoamTp2quLg4SdK0adPkcDiUnp6uQ4cOacOGDXruueeUmZnZZhcOAADMFfTXqN9//32NHz/eftwcFbNmzVJubq4WLlyo8+fPa86cOaqurtadd96pgoIChYWF2c9Zu3atMjIydPfdd6tbt26aMmWKfve739nHIyIitHXrVnm9XiUmJqp3795asmQJX6EGAACSpBDLsqzOnkR78Pv9ioiIUE1NTZvfD3Pjo/lter6O8NGqtM6eAgDgMnhf+dK3ff/uMt9CAgAA3x8EDAAAMA4BAwAAjEPAAAAA4xAwAADAOAQMAAAwDgEDAACMQ8AAAADjEDAAAMA4BAwAADAOAQMAAIxDwAAAAOMQMAAAwDgEDAAAMA4BAwAAjEPAAAAA4xAwAADAOAQMAAAwDgEDAACMQ8AAAADjEDAAAMA4BAwAADAOAQMAAIxDwAAAAOMQMAAAwDgEDAAAMA4BAwAAjEPAAAAA4xAwAADAOAQMAAAwDgEDAACMQ8AAAADjEDAAAMA4BAwAADAOAQMAAIxDwAAAAOMQMAAAwDgEDAAAMA4BAwAAjEPAAAAA4xAwAADAOAQMAAAwDgEDAACMQ8AAAADjEDAAAMA4BAwAADAOAQMAAIzT5gGzbNkyhYSEBGxDhgyxj1+8eFFer1fXX3+9rr32Wk2ZMkVVVVUB56isrFRaWpp69eql6OhoLViwQA0NDW09VQAAYKju7XHSYcOGadu2bV++SPcvX2b+/PnKz8/Xa6+9poiICGVkZOj+++/Xrl27JEmNjY1KS0uT2+3W7t27derUKc2cOVM9evTQ448/3h7TBQAAhmmXgOnevbvcbvcl+2tqavTHP/5R69at01133SVJ+tOf/qShQ4dqz549Gj16tLZu3arDhw9r27ZtiomJ0S233KKVK1dq0aJFWrZsmRwOR3tMGQAAGKRd7oH5+9//rri4OA0YMEDTp09XZWWlJKm0tFT19fVKTk62xw4ZMkR9+/ZVSUmJJKmkpETDhw9XTEyMPSY1NVV+v1+HDh1qj+kCAADDtPknMElJScrNzdXgwYN16tQpLV++XGPGjNHBgwfl8/nkcDgUGRkZ8JyYmBj5fD5Jks/nC4iX5uPNxy6ntrZWtbW19mO/399GVwQAAK42bR4wEydOtP95xIgRSkpKUr9+/bRx40b17NmzrV/Olp2dreXLl7fb+QEAwNWj3b9GHRkZqZtuuknHjh2T2+1WXV2dqqurA8ZUVVXZ98y43e5LvpXU/Lil+2qaZWVlqaamxt5OnDjRthcCAACuGu0eMOfOndOHH36o2NhYJSYmqkePHioqKrKPl5eXq7KyUh6PR5Lk8Xh04MABnT592h5TWFgol8ulhISEy76O0+mUy+UK2AAAQNfU5j9C+td//VdNnjxZ/fr108mTJ7V06VKFhobqpz/9qSIiIpSenq7MzExFRUXJ5XLpkUcekcfj0ejRoyVJKSkpSkhI0IwZM5STkyOfz6fFixfL6/XK6XS29XQBAICB2jxgPv74Y/30pz/VZ599phtuuEF33nmn9uzZoxtuuEGS9Mwzz6hbt26aMmWKamtrlZqaqhdeeMF+fmhoqPLy8vTwww/L4/Hommuu0axZs7RixYq2nioAADBUmwfM+vXrr3g8LCxMq1ev1urVqy87pl+/ftq8eXNbTw0AAHQR/F1IAADAOAQMAAAwDgEDAACMQ8AAAADjEDAAAMA4BAwAADAOAQMAAIxDwAAAAOMQMAAAwDgEDAAAMA4BAwAAjEPAAAAA4xAwAADAOAQMAAAwDgEDAACMQ8AAAADjEDAAAMA4BAwAADAOAQMAAIxDwAAAAOMQMAAAwDgEDAAAMA4BAwAAjEPAAAAA4xAwAADAOAQMAAAwDgEDAACMQ8AAAADjEDAAAMA4BAwAADAOAQMAAIxDwAAAAOMQMAAAwDgEDAAAMA4BAwAAjEPAAAAA4xAwAADAOAQMAAAwDgEDAACMQ8AAAADjEDAAAMA4BAwAADAOAQMAAIxDwAAAAOMQMAAAwDgEDAAAMA4BAwAAjEPAAAAA41zVAbN69WrdeOONCgsLU1JSkvbt29fZUwIAAFeBqzZgNmzYoMzMTC1dulR/+9vfNHLkSKWmpur06dOdPTUAANDJrtqAefrppzV79mz94he/UEJCgtasWaNevXrplVde6eypAQCATta9syfQkrq6OpWWliorK8ve161bNyUnJ6ukpKTF59TW1qq2ttZ+XFNTI0ny+/1tPr+m2i/a/JztrT3WAQDQNnhfufS8lmVdcdxVGTCffvqpGhsbFRMTE7A/JiZGR48ebfE52dnZWr58+SX74+Pj22WOpol4trNnAADoStr7feXs2bOKiIi47PGrMmBaIysrS5mZmfbjpqYmnTlzRtdff71CQkLa7HX8fr/i4+N14sQJuVyuNjsvLsVadwzWuWOwzh2Dde4Y7bnOlmXp7NmziouLu+K4qzJgevfurdDQUFVVVQXsr6qqktvtbvE5TqdTTqczYF9kZGR7TVEul4t/OToIa90xWOeOwTp3DNa5Y7TXOl/pk5dmV+VNvA6HQ4mJiSoqKrL3NTU1qaioSB6PpxNnBgAArgZX5ScwkpSZmalZs2Zp1KhRuv322/Xss8/q/Pnz+sUvftHZUwMAAJ3sqg2YBx98UJ988omWLFkin8+nW265RQUFBZfc2NvRnE6nli5desmPq9D2WOuOwTp3DNa5Y7DOHeNqWOcQ65u+pwQAAHCVuSrvgQEAALgSAgYAABiHgAEAAMYhYAAAgHEImBasXr1aN954o8LCwpSUlKR9+/Zdcfxrr72mIUOGKCwsTMOHD9fmzZs7aKbmC2at//CHP2jMmDG67rrrdN111yk5Ofkb/7fBPwT7Z7rZ+vXrFRISovvuu699J9hFBLvO1dXV8nq9io2NldPp1E033cR/P76FYNf52Wef1eDBg9WzZ0/Fx8dr/vz5unjxYgfN1kw7d+7U5MmTFRcXp5CQEL3xxhvf+Jzi4mLddtttcjqdGjRokHJzc9t3khYCrF+/3nI4HNYrr7xiHTp0yJo9e7YVGRlpVVVVtTh+165dVmhoqJWTk2MdPnzYWrx4sdWjRw/rwIEDHTxz8wS71tOmTbNWr15t7d+/3zpy5Ij185//3IqIiLA+/vjjDp65WYJd52YVFRXWP/3TP1ljxoyx7r333o6ZrMGCXefa2lpr1KhR1qRJk6x3333XqqiosIqLi62ysrIOnrlZgl3ntWvXWk6n01q7dq1VUVFhbdmyxYqNjbXmz5/fwTM3y+bNm63HHnvMev311y1J1qZNm644/vjx41avXr2szMxM6/Dhw9bzzz9vhYaGWgUFBe02RwLma26//XbL6/XajxsbG624uDgrOzu7xfE/+clPrLS0tIB9SUlJ1r/8y7+06zy7gmDX+usaGhqs8PBw69VXX22vKXYJrVnnhoYG64c//KH18ssvW7NmzSJgvoVg1/nFF1+0BgwYYNXV1XXUFLuEYNfZ6/Vad911V8C+zMxM64477mjXeXYl3yZgFi5caA0bNixg34MPPmilpqa227z4EdJX1NXVqbS0VMnJyfa+bt26KTk5WSUlJS0+p6SkJGC8JKWmpl52PP6hNWv9dV988YXq6+sVFRXVXtM0XmvXecWKFYqOjlZ6enpHTNN4rVnnN998Ux6PR16vVzExMbr55pv1+OOPq7GxsaOmbZzWrPMPf/hDlZaW2j9mOn78uDZv3qxJkyZ1yJy/LzrjvfCq/U28neHTTz9VY2PjJb/tNyYmRkePHm3xOT6fr8XxPp+v3ebZFbRmrb9u0aJFiouLu+RfGnypNev87rvv6o9//KPKyso6YIZdQ2vW+fjx49q+fbumT5+uzZs369ixY/rVr36l+vp6LV26tCOmbZzWrPO0adP06aef6s4775RlWWpoaNDcuXP1b//2bx0x5e+Ny70X+v1+XbhwQT179mzz1+QTGBhp1apVWr9+vTZt2qSwsLDOnk6XcfbsWc2YMUN/+MMf1Lt3786eTpfW1NSk6OhovfTSS0pMTNSDDz6oxx57TGvWrOnsqXUpxcXFevzxx/XCCy/ob3/7m15//XXl5+dr5cqVnT01fEd8AvMVvXv3VmhoqKqqqgL2V1VVye12t/gct9sd1Hj8Q2vWutmTTz6pVatWadu2bRoxYkR7TtN4wa7zhx9+qI8++kiTJ0+29zU1NUmSunfvrvLycg0cOLB9J22g1vx5jo2NVY8ePRQaGmrvGzp0qHw+n+rq6uRwONp1ziZqzTr/+7//u2bMmKGHHnpIkjR8+HCdP39ec+bM0WOPPaZu3fj/8W3hcu+FLperXT59kfgEJoDD4VBiYqKKiorsfU1NTSoqKpLH42nxOR6PJ2C8JBUWFl52PP6hNWstSTk5OVq5cqUKCgo0atSojpiq0YJd5yFDhujAgQMqKyuztx//+McaP368ysrKFB8f35HTN0Zr/jzfcccdOnbsmB2IkvS///u/io2NJV4uozXr/MUXX1wSKc3RaPFXAbaZTnkvbLfbgw21fv16y+l0Wrm5udbhw4etOXPmWJGRkZbP57Msy7JmzJhhPfroo/b4Xbt2Wd27d7eefPJJ68iRI9bSpUv5GvW3FOxar1q1ynI4HNZ//dd/WadOnbK3s2fPdtYlGCHYdf46voX07QS7zpWVlVZ4eLiVkZFhlZeXW3l5eVZ0dLT1m9/8prMuwQjBrvPSpUut8PBw6y9/+Yt1/Phxa+vWrdbAgQOtn/zkJ511CUY4e/astX//fmv//v2WJOvpp5+29u/fb/3f//2fZVmW9eijj1ozZsywxzd/jXrBggXWkSNHrNWrV/M16s7w/PPPW3379rUcDod1++23W3v27LGP/ehHP7JmzZoVMH7jxo3WTTfdZDkcDmvYsGFWfn5+B8/YXMGsdb9+/SxJl2xLly7t+IkbJtg/019FwHx7wa7z7t27raSkJMvpdFoDBgywfvvb31oNDQ0dPGvzBLPO9fX11rJly6yBAwdaYWFhVnx8vPWrX/3K+vzzzzt+4gZ55513WvzvbfPazpo1y/rRj350yXNuueUWy+FwWAMGDLD+9Kc/tescQyyLz9AAAIBZuAcGAAAYh4ABAADGIWAAAIBxCBgAAGAcAgYAABiHgAEAAMYhYAAAgHEIGAAAYBwCBgAAGIeAAQAAxiFgAACAcQgYAABgnP8HQI1HGawPwtEAAAAASUVORK5CYII=", - "text/plain": [ - "<Figure size 640x480 with 1 Axes>" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "# Create a histogram to show the distribution of a column\n", - "plt.hist(train_df['129'])" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [], - "source": [ - "scaler = StandardScaler()\n", - "\n", - "# Separate the features from the target variable\n", - "X = train_df.drop('129', axis=1)\n", - "y = train_df['129']\n", - "\n", - "#Fix infinite value error\n", - "# X[X == np.inf] = np.finfo('float64').max\n", - "X.replace([np.inf,-np.inf],0,inplace=True)\n", - "\n", - "# Create a SimpleImputer object to replace NaN values with the mean value of the corresponding column\n", - "imputer = SimpleImputer(strategy='mean')\n", - "\n", - "# Impute the missing values in the features data\n", - "X_imputed = imputer.fit_transform(X)\n", - "\n", - "# Fit the scaler to the features data and transform the data\n", - "X_scaled = scaler.fit_transform(X_imputed)\n", - "\n", - "# # The transformed data will be a numpy array, so you can convert it back to a DataFrame\n", - "# X_scaled_df = pd.DataFrame(X_scaled, columns=X.columns)" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [], - "source": [ - "#PCA\n", - "pca = PCA(n_components=100)\n", - "X_pca = pca.fit_transform(X_scaled)" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Accuracy: 0.895\n", - "Classification Report:\n", - " precision recall f1-score support\n", - "\n", - " 0 0.86 0.94 0.90 588\n", - " 1 0.93 0.86 0.89 612\n", - "\n", - " accuracy 0.90 1200\n", - " macro avg 0.90 0.90 0.89 1200\n", - "weighted avg 0.90 0.90 0.89 1200\n", - "\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py:458: ConvergenceWarning: lbfgs failed to converge (status=1):\n", - "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n", - "\n", - "Increase the number of iterations (max_iter) or scale the data as shown in:\n", - " https://scikit-learn.org/stable/modules/preprocessing.html\n", - "Please also refer to the documentation for alternative solver options:\n", - " https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n", - " n_iter_i = _check_optimize_result(\n" - ] - } - ], - "source": [ - "#split data\n", - "X_train, X_test, y_train, y_test = train_test_split(X_pca, y, test_size=0.2, random_state=42)\n", - "\n", - "#train the model\n", - "log_reg = LogisticRegression()\n", - "log_reg.fit(X_train, y_train)\n", - "\n", - "# 5. Evaluate the model on the testing set\n", - "y_pred = log_reg.predict(X_test)\n", - "accuracy = accuracy_score(y_test, y_pred)\n", - "\n", - "report = classification_report(y_test, y_pred)\n", - "\n", - "print(\"Accuracy:\", accuracy)\n", - "\n", - "print(\"Classification Report:\\n\", report)" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Confusion Matrix:\n", - " [[550 38]\n", - " [ 88 524]]\n" - ] - } - ], - "source": [ - "conf_matrix = confusion_matrix(y_test, y_pred)\n", - "print(\"Confusion Matrix:\\n\", conf_matrix)" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "[[True Negatives (TN), False Positives (FP)],\n", - " [False Negatives (FN), True Positives (TP)]]" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Fine tuning" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Fitting 3 folds for each of 100 candidates, totalling 300 fits\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\model_selection\\_validation.py:378: FitFailedWarning: \n", - "120 fits failed out of a total of 300.\n", - "The score on these train-test partitions for these parameters will be set to nan.\n", - "If these failures are not expected, you can try to debug them by setting error_score='raise'.\n", - "\n", - "Below are more details about the failures:\n", - "--------------------------------------------------------------------------------\n", - "24 fits failed with the following error:\n", - "Traceback (most recent call last):\n", - " File \"c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\model_selection\\_validation.py\", line 686, in _fit_and_score\n", - " estimator.fit(X_train, y_train, **fit_params)\n", - " File \"c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py\", line 1162, in fit\n", - " solver = _check_solver(self.solver, self.penalty, self.dual)\n", - " File \"c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py\", line 54, in _check_solver\n", - " raise ValueError(\n", - "ValueError: Solver newton-cg supports only 'l2' or 'none' penalties, got elasticnet penalty.\n", - "\n", - "--------------------------------------------------------------------------------\n", - "15 fits failed with the following error:\n", - "Traceback (most recent call last):\n", - " File \"c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\model_selection\\_validation.py\", line 686, in _fit_and_score\n", - " estimator.fit(X_train, y_train, **fit_params)\n", - " File \"c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py\", line 1162, in fit\n", - " solver = _check_solver(self.solver, self.penalty, self.dual)\n", - " File \"c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py\", line 71, in _check_solver\n", - " raise ValueError(\"penalty='none' is not supported for the liblinear solver\")\n", - "ValueError: penalty='none' is not supported for the liblinear solver\n", - "\n", - "--------------------------------------------------------------------------------\n", - "18 fits failed with the following error:\n", - "Traceback (most recent call last):\n", - " File \"c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\model_selection\\_validation.py\", line 686, in _fit_and_score\n", - " estimator.fit(X_train, y_train, **fit_params)\n", - " File \"c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py\", line 1162, in fit\n", - " solver = _check_solver(self.solver, self.penalty, self.dual)\n", - " File \"c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py\", line 54, in _check_solver\n", - " raise ValueError(\n", - "ValueError: Solver lbfgs supports only 'l2' or 'none' penalties, got l1 penalty.\n", - "\n", - "--------------------------------------------------------------------------------\n", - "6 fits failed with the following error:\n", - "Traceback (most recent call last):\n", - " File \"c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\model_selection\\_validation.py\", line 686, in _fit_and_score\n", - " estimator.fit(X_train, y_train, **fit_params)\n", - " File \"c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py\", line 1291, in fit\n", - " fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose, prefer=prefer)(\n", - " File \"c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\utils\\parallel.py\", line 63, in __call__\n", - " return super().__call__(iterable_with_config)\n", - " File \"c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\joblib\\parallel.py\", line 1085, in __call__\n", - " if self.dispatch_one_batch(iterator):\n", - " File \"c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\joblib\\parallel.py\", line 901, in dispatch_one_batch\n", - " self._dispatch(tasks)\n", - " File \"c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\joblib\\parallel.py\", line 819, in _dispatch\n", - " job = self._backend.apply_async(batch, callback=cb)\n", - " File \"c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\joblib\\_parallel_backends.py\", line 208, in apply_async\n", - " result = ImmediateResult(func)\n", - " File \"c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\joblib\\_parallel_backends.py\", line 597, in __init__\n", - " self.results = batch()\n", - " File \"c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\joblib\\parallel.py\", line 288, in __call__\n", - " return [func(*args, **kwargs)\n", - " File \"c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\joblib\\parallel.py\", line 288, in <listcomp>\n", - " return [func(*args, **kwargs)\n", - " File \"c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\utils\\parallel.py\", line 123, in __call__\n", - " return self.function(*args, **kwargs)\n", - " File \"c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py\", line 521, in _logistic_regression_path\n", - " alpha = (1.0 / C) * (1 - l1_ratio)\n", - "TypeError: unsupported operand type(s) for -: 'int' and 'NoneType'\n", - "\n", - "--------------------------------------------------------------------------------\n", - "21 fits failed with the following error:\n", - "Traceback (most recent call last):\n", - " File \"c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\model_selection\\_validation.py\", line 686, in _fit_and_score\n", - " estimator.fit(X_train, y_train, **fit_params)\n", - " File \"c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py\", line 1162, in fit\n", - " solver = _check_solver(self.solver, self.penalty, self.dual)\n", - " File \"c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py\", line 64, in _check_solver\n", - " raise ValueError(\n", - "ValueError: Only 'saga' solver supports elasticnet penalty, got solver=liblinear.\n", - "\n", - "--------------------------------------------------------------------------------\n", - "12 fits failed with the following error:\n", - "Traceback (most recent call last):\n", - " File \"c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\model_selection\\_validation.py\", line 686, in _fit_and_score\n", - " estimator.fit(X_train, y_train, **fit_params)\n", - " File \"c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py\", line 1162, in fit\n", - " solver = _check_solver(self.solver, self.penalty, self.dual)\n", - " File \"c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py\", line 54, in _check_solver\n", - " raise ValueError(\n", - "ValueError: Solver sag supports only 'l2' or 'none' penalties, got l1 penalty.\n", - "\n", - "--------------------------------------------------------------------------------\n", - "9 fits failed with the following error:\n", - "Traceback (most recent call last):\n", - " File \"c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\model_selection\\_validation.py\", line 686, in _fit_and_score\n", - " estimator.fit(X_train, y_train, **fit_params)\n", - " File \"c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py\", line 1162, in fit\n", - " solver = _check_solver(self.solver, self.penalty, self.dual)\n", - " File \"c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py\", line 54, in _check_solver\n", - " raise ValueError(\n", - "ValueError: Solver sag supports only 'l2' or 'none' penalties, got elasticnet penalty.\n", - "\n", - "--------------------------------------------------------------------------------\n", - "6 fits failed with the following error:\n", - "Traceback (most recent call last):\n", - " File \"c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\model_selection\\_validation.py\", line 686, in _fit_and_score\n", - " estimator.fit(X_train, y_train, **fit_params)\n", - " File \"c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py\", line 1162, in fit\n", - " solver = _check_solver(self.solver, self.penalty, self.dual)\n", - " File \"c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py\", line 54, in _check_solver\n", - " raise ValueError(\n", - "ValueError: Solver lbfgs supports only 'l2' or 'none' penalties, got elasticnet penalty.\n", - "\n", - "--------------------------------------------------------------------------------\n", - "9 fits failed with the following error:\n", - "Traceback (most recent call last):\n", - " File \"c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\model_selection\\_validation.py\", line 686, in _fit_and_score\n", - " estimator.fit(X_train, y_train, **fit_params)\n", - " File \"c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py\", line 1162, in fit\n", - " solver = _check_solver(self.solver, self.penalty, self.dual)\n", - " File \"c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py\", line 54, in _check_solver\n", - " raise ValueError(\n", - "ValueError: Solver newton-cg supports only 'l2' or 'none' penalties, got l1 penalty.\n", - "\n", - " warnings.warn(some_fits_failed_message, FitFailedWarning)\n", - "c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\model_selection\\_search.py:952: UserWarning: One or more of the test scores are non-finite: [ nan 0.87666667 0.92083333 nan nan 0.87416667\n", - " nan 0.87666667 0.864375 0.87645833 0.78208333 0.87854167\n", - " 0.72333333 0.87854167 0.85645833 nan nan nan\n", - " 0.85083333 0.72333333 0.5025 0.92020833 0.78208333 0.918125\n", - " 0.86458333 0.87666667 nan 0.9225 0.90375 nan\n", - " 0.78208333 nan 0.5025 nan nan nan\n", - " nan 0.78208333 nan 0.78208333 0.85645833 0.628125\n", - " 0.918125 nan 0.49916667 0.85875 nan 0.49916667\n", - " nan nan 0.87791667 0.86520833 nan 0.9225\n", - " nan 0.918125 0.865625 0.84166667 nan 0.9225\n", - " 0.90375 0.918125 0.87375 0.918125 0.864375 nan\n", - " nan 0.87666667 nan 0.90375 0.85625 0.62895833\n", - " nan nan 0.85625 nan nan 0.87854167\n", - " 0.85645833 nan 0.87791667 0.90395833 0.87854167 nan\n", - " nan 0.87375 0.78208333 0.87666667 nan nan\n", - " 0.78208333 0.90270833 nan nan 0.85625 nan\n", - " 0.86583333 nan nan nan]\n", - " warnings.warn(\n", - "c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py:1173: FutureWarning: `penalty='none'`has been deprecated in 1.2 and will be removed in 1.4. To keep the past behaviour, set `penalty=None`.\n", - " warnings.warn(\n", - "c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py:1181: UserWarning: Setting penalty=None will ignore the C and l1_ratio parameters\n", - " warnings.warn(\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Best Parameters: {'solver': 'newton-cg', 'penalty': 'none', 'max_iter': 100, 'C': 0.005994842503189409}\n", - "Best Score: 0.9225\n", - "Accuracy: 0.9325\n", - "Confusion Matrix:\n", - " [[557 31]\n", - " [ 50 562]]\n", - "Classification Report:\n", - " precision recall f1-score support\n", - "\n", - " 0 0.92 0.95 0.93 588\n", - " 1 0.95 0.92 0.93 612\n", - "\n", - " accuracy 0.93 1200\n", - " macro avg 0.93 0.93 0.93 1200\n", - "weighted avg 0.93 0.93 0.93 1200\n", - "\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\utils\\optimize.py:210: ConvergenceWarning: newton-cg failed to converge. Increase the number of iterations.\n", - " warnings.warn(\n" - ] - } - ], - "source": [ - "\n", - "param_dist = {\n", - " 'penalty': ['l1', 'l2', 'elasticnet', 'none'],\n", - " 'C': np.logspace(-4, 4, 10),\n", - " 'solver': ['newton-cg', 'lbfgs', 'liblinear', 'sag', 'saga'],\n", - " 'max_iter': [100, 500, 1000],\n", - "}\n", - "\n", - "# Create the RandomizedSearchCV object with the logistic regression model, hyperparameters, and cross-validation\n", - "log_reg = LogisticRegression()\n", - "random_search = RandomizedSearchCV(log_reg, param_dist, n_iter=100, cv=3, n_jobs=-1, verbose=1, random_state=42)\n", - "\n", - "# Fit the random search to the training data\n", - "random_search.fit(X_train, y_train)\n", - "\n", - "# Check the best hyperparameters found\n", - "print(\"Best Parameters:\", random_search.best_params_)\n", - "print(\"Best Score:\", random_search.best_score_)\n", - "\n", - "# Use the best estimator for predictions and evaluation\n", - "best_model = random_search.best_estimator_\n", - "y_pred = best_model.predict(X_test)\n", - "accuracy = accuracy_score(y_test, y_pred)\n", - "conf_matrix = confusion_matrix(y_test, y_pred)\n", - "report = classification_report(y_test, y_pred)\n", - "\n", - "print(\"Accuracy:\", accuracy)\n", - "print(\"Confusion Matrix:\\n\", conf_matrix)\n", - "print(\"Classification Report:\\n\", report)\n" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Predict" - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "metadata": {}, - "outputs": [], - "source": [ - "test_data = pd.read_csv('TestingDataBinary.csv')" - ] - }, - { - "cell_type": "code", - "execution_count": 14, - "metadata": {}, - "outputs": [], - "source": [ - "# Preprocessing\n", - "X_new = test_data\n", - "X_new.replace([np.inf, -np.inf], 0, inplace=True)\n", - "\n", - "# Impute the missing values in the features data\n", - "X_imputed_new = imputer.transform(X_new)\n", - "\n", - "# Scale the features data\n", - "X_scaled_new = scaler.transform(X_imputed_new)\n", - "\n", - "# Apply PCA transformation\n", - "X_pca_new = pca.transform(X_scaled_new)" - ] - }, - { - "cell_type": "code", - "execution_count": 16, - "metadata": {}, - "outputs": [], - "source": [ - "# Use the best estimator for predictions on the new data\n", - "y_pred_new = best_model.predict(X_pca_new)\n", - "\n", - "# Save the predictions to a new column in the DataFrame\n", - "test_data['predicted_marker'] = y_pred_new\n", - "\n", - "# Save the updated DataFrame to a new CSV file\n", - "test_data.to_csv('TestingDataBinary_with_predictions.csv', index=False)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.0" - }, - "orig_nbformat": 4 - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/part1.ipynb b/part1.ipynb index 0824cedf423c3bb0134c30884073a94782bd0dbe..4d6c92eebc90ec3d5a30f371405689f5c4a699bc 100644 --- a/part1.ipynb +++ b/part1.ipynb @@ -17,7 +17,19 @@ "import matplotlib.pyplot as plt\n", "from sklearn.decomposition import PCA\n", "from sklearn.impute import SimpleImputer\n", - "from sklearn.model_selection import GridSearchCV" + "from sklearn.model_selection import GridSearchCV\n", + "import numpy as np\n", + "import pandas as pd\n", + "from sklearn.preprocessing import StandardScaler\n", + "from sklearn.impute import SimpleImputer\n", + "from sklearn.decomposition import PCA\n", + "from sklearn.model_selection import train_test_split\n", + "from sklearn.linear_model import LogisticRegression\n", + "from sklearn.metrics import accuracy_score, confusion_matrix, classification_report\n", + "from sklearn.model_selection import GridSearchCV\n", + "import pandas as pd\n", + "from sklearn.impute import SimpleImputer\n", + "from sklearn.model_selection import RandomizedSearchCV\n" ] }, { @@ -34,6 +46,28 @@ "cell_type": "code", "execution_count": 3, "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "<class 'pandas.core.frame.DataFrame'>\n", + "RangeIndex: 6000 entries, 0 to 5999\n", + "Columns: 129 entries, 1 to 129\n", + "dtypes: float64(112), int64(17)\n", + "memory usage: 5.9 MB\n", + "None\n" + ] + } + ], + "source": [ + "print(train_df.info())" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, "outputs": [ { "data": { @@ -44,7 +78,7 @@ " <BarContainer object of 10 artists>)" ] }, - "execution_count": 3, + "execution_count": 4, "metadata": {}, "output_type": "execute_result" }, @@ -61,7 +95,7 @@ ], "source": [ "# Create a histogram to show the distribution of a column\n", - "plt.hist(train_df['marker'])" + "plt.hist(train_df['129'])" ] }, { @@ -73,11 +107,10 @@ "scaler = StandardScaler()\n", "\n", "# Separate the features from the target variable\n", - "X = train_df.drop('marker', axis=1)\n", - "y = train_df['marker']\n", + "X = train_df.drop('129', axis=1)\n", + "y = train_df['129']\n", "\n", "#Fix infinite value error\n", - "# X[X == np.inf] = np.finfo('float64').max\n", "X.replace([np.inf,-np.inf],0,inplace=True)\n", "\n", "# Create a SimpleImputer object to replace NaN values with the mean value of the corresponding column\n", @@ -95,101 +128,106 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": 6, "metadata": {}, "outputs": [], "source": [ - "n_components = 100\n", - "pca = PCA(n_components=n_components)\n", - "principal_components = pca.fit_transform(X_scaled)\n", - "\n", - "# Create a DataFrame with the loadings\n", - "loadings = pd.DataFrame(pca.components_.T, columns=[f'PC{i+1}' for i in range(n_components)], index=X.columns)\n", - "\n", - "# Apply PCA to the scaled data\n", - "# pca = PCA(n_components=100)\n", - "# X_pca = pca.fit_transform(X_scaled)\n", - "\n", - "# Split the data into training and testing sets\n", - "X_train, X_test, y_train, y_test = train_test_split(pca, y, test_size=0.2,random_state=42)\n", - "\n", - "# # Train the model on the training data\n", - "# lr.fit(X_train, y_train)\n", - "\n", - "# # Predict the labels for the test data\n", - "# y_pred = lr.predict(X_test)\n", - "\n", - "# # Evaluate the model performance\n", - "# print(\"Accuracy:\", accuracy_score(y_test, y_pred))" + "#PCA\n", + "pca = PCA(n_components=100)\n", + "X_pca = pca.fit_transform(X_scaled)" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 7, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Accuracy: 0.895\n", + "Classification Report:\n", + " precision recall f1-score support\n", + "\n", + " 0 0.86 0.94 0.90 588\n", + " 1 0.93 0.86 0.89 612\n", + "\n", + " accuracy 0.90 1200\n", + " macro avg 0.90 0.90 0.89 1200\n", + "weighted avg 0.90 0.90 0.89 1200\n", + "\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py:458: ConvergenceWarning: lbfgs failed to converge (status=1):\n", + "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n", + "\n", + "Increase the number of iterations (max_iter) or scale the data as shown in:\n", + " https://scikit-learn.org/stable/modules/preprocessing.html\n", + "Please also refer to the documentation for alternative solver options:\n", + " https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n", + " n_iter_i = _check_optimize_result(\n" + ] + } + ], "source": [ - "X_test_pca = pca.transform(X_test_scaled)\n", - "clf = LogisticRegression(random_state=42)\n", - "clf.fit(X_train_pca, y_train)\n", - "\n", + "#split data\n", + "X_train, X_test, y_train, y_test = train_test_split(X_pca, y, test_size=0.2, random_state=42)\n", "\n", - "y_pred = clf.predict(X_test_pca)\n", + "#train the model\n", + "log_reg = LogisticRegression()\n", + "log_reg.fit(X_train, y_train)\n", "\n", - "# Calculate and print the accuracy of the model\n", + "# 5. Evaluate the model on the testing set\n", + "y_pred = log_reg.predict(X_test)\n", "accuracy = accuracy_score(y_test, y_pred)\n", - "print(\"Accuracy:\", accuracy)" - ] - }, - { - "cell_type": "code", - "execution_count": 14, - "metadata": {}, - "outputs": [], - "source": [ - "# Read the test dataset\n", - "test_df = pd.read_csv('TestingDataBinary.csv')" + "\n", + "report = classification_report(y_test, y_pred)\n", + "\n", + "print(\"Accuracy:\", accuracy)\n", + "\n", + "print(\"Classification Report:\\n\", report)" ] }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 8, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "Top 100 components:\n", - " [[ 3.72196354e+00 -5.87941588e+00 -4.02934784e-01 ... 4.15787367e-03\n", - " 1.89567282e-03 2.81043971e-03]\n", - " [ 1.25401316e+00 -5.82245182e+00 -7.51607953e-01 ... 5.71178351e-04\n", - " 1.64284342e-04 3.97691294e-03]\n", - " [ 1.24713154e+00 -5.82164239e+00 -7.59379345e-01 ... 3.40089202e-03\n", - " 2.59366304e-04 4.28360451e-03]\n", - " ...\n", - " [-6.89160079e-01 -5.50909843e+00 -4.69952506e-01 ... -2.71254494e-03\n", - " -9.03351989e-05 -2.02581895e-03]\n", - " [ 7.34703326e-01 -5.58643030e+00 -5.41845944e-01 ... -3.62008786e-03\n", - " -8.72999728e-05 -2.60358277e-03]\n", - " [ 7.35621169e-01 -5.58380312e+00 -5.36559421e-01 ... -3.46823833e-03\n", - " 3.30081328e-04 -2.83803266e-03]]\n" + "Confusion Matrix:\n", + " [[550 38]\n", + " [ 88 524]]\n" ] } ], "source": [ - "# explained_variance_ratio = pca.explained_variance_ratio_\n", - "\n", - "\n", - "# sorted_indices = np.argsort(explained_variance_ratio)[::-1]\n", - "\n", - "# # Get the top 100 components\n", - "# top_100_indices = sorted_indices[:100]\n", - "# top_100_components = principal_components[:, top_100_indices]\n", - "# top_100_explained_variance_ratio = explained_variance_ratio[top_100_indices]\n", - "\n", - "\n", - "# print(\"Top 100 components:\\n\", top_100_components)" + "conf_matrix = confusion_matrix(y_test, y_pred)\n", + "print(\"Confusion Matrix:\\n\", conf_matrix)" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "[[True Negatives (TN), False Positives (FP)],\n", + " [False Negatives (FN), True Positives (TP)]]" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Fine tuning" ] }, { @@ -197,266 +235,279 @@ "execution_count": 9, "metadata": {}, "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Fitting 3 folds for each of 100 candidates, totalling 300 fits\n" + ] + }, { "name": "stderr", "output_type": "stream", "text": [ - "c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py:458: ConvergenceWarning: lbfgs failed to converge (status=1):\n", - "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n", - "\n", - "Increase the number of iterations (max_iter) or scale the data as shown in:\n", - " https://scikit-learn.org/stable/modules/preprocessing.html\n", - "Please also refer to the documentation for alternative solver options:\n", - " https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n", - " n_iter_i = _check_optimize_result(\n", - "c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py:458: ConvergenceWarning: lbfgs failed to converge (status=1):\n", - "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n", + "c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\model_selection\\_validation.py:378: FitFailedWarning: \n", + "120 fits failed out of a total of 300.\n", + "The score on these train-test partitions for these parameters will be set to nan.\n", + "If these failures are not expected, you can try to debug them by setting error_score='raise'.\n", "\n", - "Increase the number of iterations (max_iter) or scale the data as shown in:\n", - " https://scikit-learn.org/stable/modules/preprocessing.html\n", - "Please also refer to the documentation for alternative solver options:\n", - " https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n", - " n_iter_i = _check_optimize_result(\n", - "c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py:458: ConvergenceWarning: lbfgs failed to converge (status=1):\n", - "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n", - "\n", - "Increase the number of iterations (max_iter) or scale the data as shown in:\n", - " https://scikit-learn.org/stable/modules/preprocessing.html\n", - "Please also refer to the documentation for alternative solver options:\n", - " https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n", - " n_iter_i = _check_optimize_result(\n", - "c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py:458: ConvergenceWarning: lbfgs failed to converge (status=1):\n", - "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n", - "\n", - "Increase the number of iterations (max_iter) or scale the data as shown in:\n", - " https://scikit-learn.org/stable/modules/preprocessing.html\n", - "Please also refer to the documentation for alternative solver options:\n", - " https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n", - " n_iter_i = _check_optimize_result(\n", - "c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py:458: ConvergenceWarning: lbfgs failed to converge (status=1):\n", - "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n", - "\n", - "Increase the number of iterations (max_iter) or scale the data as shown in:\n", - " https://scikit-learn.org/stable/modules/preprocessing.html\n", - "Please also refer to the documentation for alternative solver options:\n", - " https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n", - " n_iter_i = _check_optimize_result(\n", - "c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py:458: ConvergenceWarning: lbfgs failed to converge (status=1):\n", - "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n", - "\n", - "Increase the number of iterations (max_iter) or scale the data as shown in:\n", - " https://scikit-learn.org/stable/modules/preprocessing.html\n", - "Please also refer to the documentation for alternative solver options:\n", - " https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n", - " n_iter_i = _check_optimize_result(\n", - "c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py:458: ConvergenceWarning: lbfgs failed to converge (status=1):\n", - "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n", + "Below are more details about the failures:\n", + "--------------------------------------------------------------------------------\n", + "24 fits failed with the following error:\n", + "Traceback (most recent call last):\n", + " File \"c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\model_selection\\_validation.py\", line 686, in _fit_and_score\n", + " estimator.fit(X_train, y_train, **fit_params)\n", + " File \"c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py\", line 1162, in fit\n", + " solver = _check_solver(self.solver, self.penalty, self.dual)\n", + " File \"c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py\", line 54, in _check_solver\n", + " raise ValueError(\n", + "ValueError: Solver newton-cg supports only 'l2' or 'none' penalties, got elasticnet penalty.\n", "\n", - "Increase the number of iterations (max_iter) or scale the data as shown in:\n", - " https://scikit-learn.org/stable/modules/preprocessing.html\n", - "Please also refer to the documentation for alternative solver options:\n", - " https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n", - " n_iter_i = _check_optimize_result(\n", - "c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py:458: ConvergenceWarning: lbfgs failed to converge (status=1):\n", - "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n", + "--------------------------------------------------------------------------------\n", + "15 fits failed with the following error:\n", + "Traceback (most recent call last):\n", + " File \"c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\model_selection\\_validation.py\", line 686, in _fit_and_score\n", + " estimator.fit(X_train, y_train, **fit_params)\n", + " File \"c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py\", line 1162, in fit\n", + " solver = _check_solver(self.solver, self.penalty, self.dual)\n", + " File \"c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py\", line 71, in _check_solver\n", + " raise ValueError(\"penalty='none' is not supported for the liblinear solver\")\n", + "ValueError: penalty='none' is not supported for the liblinear solver\n", "\n", - "Increase the number of iterations (max_iter) or scale the data as shown in:\n", - " https://scikit-learn.org/stable/modules/preprocessing.html\n", - "Please also refer to the documentation for alternative solver options:\n", - " https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n", - " n_iter_i = _check_optimize_result(\n", - "c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py:458: ConvergenceWarning: lbfgs failed to converge (status=1):\n", - "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n", + "--------------------------------------------------------------------------------\n", + "18 fits failed with the following error:\n", + "Traceback (most recent call last):\n", + " File \"c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\model_selection\\_validation.py\", line 686, in _fit_and_score\n", + " estimator.fit(X_train, y_train, **fit_params)\n", + " File \"c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py\", line 1162, in fit\n", + " solver = _check_solver(self.solver, self.penalty, self.dual)\n", + " File \"c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py\", line 54, in _check_solver\n", + " raise ValueError(\n", + "ValueError: Solver lbfgs supports only 'l2' or 'none' penalties, got l1 penalty.\n", "\n", - "Increase the number of iterations (max_iter) or scale the data as shown in:\n", - " https://scikit-learn.org/stable/modules/preprocessing.html\n", - "Please also refer to the documentation for alternative solver options:\n", - " https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n", - " n_iter_i = _check_optimize_result(\n", - "c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py:458: ConvergenceWarning: lbfgs failed to converge (status=1):\n", - "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n", + "--------------------------------------------------------------------------------\n", + "6 fits failed with the following error:\n", + "Traceback (most recent call last):\n", + " File \"c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\model_selection\\_validation.py\", line 686, in _fit_and_score\n", + " estimator.fit(X_train, y_train, **fit_params)\n", + " File \"c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py\", line 1291, in fit\n", + " fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose, prefer=prefer)(\n", + " File \"c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\utils\\parallel.py\", line 63, in __call__\n", + " return super().__call__(iterable_with_config)\n", + " File \"c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\joblib\\parallel.py\", line 1085, in __call__\n", + " if self.dispatch_one_batch(iterator):\n", + " File \"c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\joblib\\parallel.py\", line 901, in dispatch_one_batch\n", + " self._dispatch(tasks)\n", + " File \"c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\joblib\\parallel.py\", line 819, in _dispatch\n", + " job = self._backend.apply_async(batch, callback=cb)\n", + " File \"c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\joblib\\_parallel_backends.py\", line 208, in apply_async\n", + " result = ImmediateResult(func)\n", + " File \"c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\joblib\\_parallel_backends.py\", line 597, in __init__\n", + " self.results = batch()\n", + " File \"c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\joblib\\parallel.py\", line 288, in __call__\n", + " return [func(*args, **kwargs)\n", + " File \"c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\joblib\\parallel.py\", line 288, in <listcomp>\n", + " return [func(*args, **kwargs)\n", + " File \"c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\utils\\parallel.py\", line 123, in __call__\n", + " return self.function(*args, **kwargs)\n", + " File \"c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py\", line 521, in _logistic_regression_path\n", + " alpha = (1.0 / C) * (1 - l1_ratio)\n", + "TypeError: unsupported operand type(s) for -: 'int' and 'NoneType'\n", "\n", - "Increase the number of iterations (max_iter) or scale the data as shown in:\n", - " https://scikit-learn.org/stable/modules/preprocessing.html\n", - "Please also refer to the documentation for alternative solver options:\n", - " https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n", - " n_iter_i = _check_optimize_result(\n", - "c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py:458: ConvergenceWarning: lbfgs failed to converge (status=1):\n", - "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n", + "--------------------------------------------------------------------------------\n", + "21 fits failed with the following error:\n", + "Traceback (most recent call last):\n", + " File \"c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\model_selection\\_validation.py\", line 686, in _fit_and_score\n", + " estimator.fit(X_train, y_train, **fit_params)\n", + " File \"c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py\", line 1162, in fit\n", + " solver = _check_solver(self.solver, self.penalty, self.dual)\n", + " File \"c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py\", line 64, in _check_solver\n", + " raise ValueError(\n", + "ValueError: Only 'saga' solver supports elasticnet penalty, got solver=liblinear.\n", "\n", - "Increase the number of iterations (max_iter) or scale the data as shown in:\n", - " https://scikit-learn.org/stable/modules/preprocessing.html\n", - "Please also refer to the documentation for alternative solver options:\n", - " https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n", - " n_iter_i = _check_optimize_result(\n", - "c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py:458: ConvergenceWarning: lbfgs failed to converge (status=1):\n", - "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n", + "--------------------------------------------------------------------------------\n", + "12 fits failed with the following error:\n", + "Traceback (most recent call last):\n", + " File \"c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\model_selection\\_validation.py\", line 686, in _fit_and_score\n", + " estimator.fit(X_train, y_train, **fit_params)\n", + " File \"c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py\", line 1162, in fit\n", + " solver = _check_solver(self.solver, self.penalty, self.dual)\n", + " File \"c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py\", line 54, in _check_solver\n", + " raise ValueError(\n", + "ValueError: Solver sag supports only 'l2' or 'none' penalties, got l1 penalty.\n", "\n", - "Increase the number of iterations (max_iter) or scale the data as shown in:\n", - " https://scikit-learn.org/stable/modules/preprocessing.html\n", - "Please also refer to the documentation for alternative solver options:\n", - " https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n", - " n_iter_i = _check_optimize_result(\n", - "c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py:458: ConvergenceWarning: lbfgs failed to converge (status=1):\n", - "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n", + "--------------------------------------------------------------------------------\n", + "9 fits failed with the following error:\n", + "Traceback (most recent call last):\n", + " File \"c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\model_selection\\_validation.py\", line 686, in _fit_and_score\n", + " estimator.fit(X_train, y_train, **fit_params)\n", + " File \"c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py\", line 1162, in fit\n", + " solver = _check_solver(self.solver, self.penalty, self.dual)\n", + " File \"c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py\", line 54, in _check_solver\n", + " raise ValueError(\n", + "ValueError: Solver sag supports only 'l2' or 'none' penalties, got elasticnet penalty.\n", "\n", - "Increase the number of iterations (max_iter) or scale the data as shown in:\n", - " https://scikit-learn.org/stable/modules/preprocessing.html\n", - "Please also refer to the documentation for alternative solver options:\n", - " https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n", - " n_iter_i = _check_optimize_result(\n", - "c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py:458: ConvergenceWarning: lbfgs failed to converge (status=1):\n", - "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n", + "--------------------------------------------------------------------------------\n", + "6 fits failed with the following error:\n", + "Traceback (most recent call last):\n", + " File \"c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\model_selection\\_validation.py\", line 686, in _fit_and_score\n", + " estimator.fit(X_train, y_train, **fit_params)\n", + " File \"c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py\", line 1162, in fit\n", + " solver = _check_solver(self.solver, self.penalty, self.dual)\n", + " File \"c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py\", line 54, in _check_solver\n", + " raise ValueError(\n", + "ValueError: Solver lbfgs supports only 'l2' or 'none' penalties, got elasticnet penalty.\n", "\n", - "Increase the number of iterations (max_iter) or scale the data as shown in:\n", - " https://scikit-learn.org/stable/modules/preprocessing.html\n", - "Please also refer to the documentation for alternative solver options:\n", - " https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n", - " n_iter_i = _check_optimize_result(\n", - "c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py:458: ConvergenceWarning: lbfgs failed to converge (status=1):\n", - "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n", + "--------------------------------------------------------------------------------\n", + "9 fits failed with the following error:\n", + "Traceback (most recent call last):\n", + " File \"c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\model_selection\\_validation.py\", line 686, in _fit_and_score\n", + " estimator.fit(X_train, y_train, **fit_params)\n", + " File \"c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py\", line 1162, in fit\n", + " solver = _check_solver(self.solver, self.penalty, self.dual)\n", + " File \"c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py\", line 54, in _check_solver\n", + " raise ValueError(\n", + "ValueError: Solver newton-cg supports only 'l2' or 'none' penalties, got l1 penalty.\n", "\n", - "Increase the number of iterations (max_iter) or scale the data as shown in:\n", - " https://scikit-learn.org/stable/modules/preprocessing.html\n", - "Please also refer to the documentation for alternative solver options:\n", - " https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n", - " n_iter_i = _check_optimize_result(\n", - "c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\svm\\_base.py:1244: ConvergenceWarning: Liblinear failed to converge, increase the number of iterations.\n", - " warnings.warn(\n", - "c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\svm\\_base.py:1244: ConvergenceWarning: Liblinear failed to converge, increase the number of iterations.\n", - " warnings.warn(\n", - "c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\svm\\_base.py:1244: ConvergenceWarning: Liblinear failed to converge, increase the number of iterations.\n", + " warnings.warn(some_fits_failed_message, FitFailedWarning)\n", + "c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\model_selection\\_search.py:952: UserWarning: One or more of the test scores are non-finite: [ nan 0.87666667 0.92083333 nan nan 0.87416667\n", + " nan 0.87666667 0.864375 0.87645833 0.78208333 0.87854167\n", + " 0.72333333 0.87854167 0.85645833 nan nan nan\n", + " 0.85083333 0.72333333 0.5025 0.92020833 0.78208333 0.918125\n", + " 0.86458333 0.87666667 nan 0.9225 0.90375 nan\n", + " 0.78208333 nan 0.5025 nan nan nan\n", + " nan 0.78208333 nan 0.78208333 0.85645833 0.628125\n", + " 0.918125 nan 0.49916667 0.85875 nan 0.49916667\n", + " nan nan 0.87791667 0.86520833 nan 0.9225\n", + " nan 0.918125 0.865625 0.84166667 nan 0.9225\n", + " 0.90375 0.918125 0.87375 0.918125 0.864375 nan\n", + " nan 0.87666667 nan 0.90375 0.85625 0.62895833\n", + " nan nan 0.85625 nan nan 0.87854167\n", + " 0.85645833 nan 0.87791667 0.90395833 0.87854167 nan\n", + " nan 0.87375 0.78208333 0.87666667 nan nan\n", + " 0.78208333 0.90270833 nan nan 0.85625 nan\n", + " 0.86583333 nan nan nan]\n", " warnings.warn(\n", - "c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\svm\\_base.py:1244: ConvergenceWarning: Liblinear failed to converge, increase the number of iterations.\n", + "c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py:1173: FutureWarning: `penalty='none'`has been deprecated in 1.2 and will be removed in 1.4. To keep the past behaviour, set `penalty=None`.\n", " warnings.warn(\n", - "c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\svm\\_base.py:1244: ConvergenceWarning: Liblinear failed to converge, increase the number of iterations.\n", - " warnings.warn(\n", - "c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py:458: ConvergenceWarning: lbfgs failed to converge (status=1):\n", - "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n", - "\n", - "Increase the number of iterations (max_iter) or scale the data as shown in:\n", - " https://scikit-learn.org/stable/modules/preprocessing.html\n", - "Please also refer to the documentation for alternative solver options:\n", - " https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n", - " n_iter_i = _check_optimize_result(\n", - "c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py:458: ConvergenceWarning: lbfgs failed to converge (status=1):\n", - "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n", - "\n", - "Increase the number of iterations (max_iter) or scale the data as shown in:\n", - " https://scikit-learn.org/stable/modules/preprocessing.html\n", - "Please also refer to the documentation for alternative solver options:\n", - " https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n", - " n_iter_i = _check_optimize_result(\n", - "c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py:458: ConvergenceWarning: lbfgs failed to converge (status=1):\n", - "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n", - "\n", - "Increase the number of iterations (max_iter) or scale the data as shown in:\n", - " https://scikit-learn.org/stable/modules/preprocessing.html\n", - "Please also refer to the documentation for alternative solver options:\n", - " https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n", - " n_iter_i = _check_optimize_result(\n", - "c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py:458: ConvergenceWarning: lbfgs failed to converge (status=1):\n", - "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n", - "\n", - "Increase the number of iterations (max_iter) or scale the data as shown in:\n", - " https://scikit-learn.org/stable/modules/preprocessing.html\n", - "Please also refer to the documentation for alternative solver options:\n", - " https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n", - " n_iter_i = _check_optimize_result(\n", - "c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py:458: ConvergenceWarning: lbfgs failed to converge (status=1):\n", - "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n", - "\n", - "Increase the number of iterations (max_iter) or scale the data as shown in:\n", - " https://scikit-learn.org/stable/modules/preprocessing.html\n", - "Please also refer to the documentation for alternative solver options:\n", - " https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n", - " n_iter_i = _check_optimize_result(\n" + "c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py:1181: UserWarning: Setting penalty=None will ignore the C and l1_ratio parameters\n", + " warnings.warn(\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ - "Best hyperparameters: {'C': 100, 'solver': 'liblinear'}\n", - "Best accuracy score: 0.8968333333333334\n" + "Best Parameters: {'solver': 'newton-cg', 'penalty': 'none', 'max_iter': 100, 'C': 0.005994842503189409}\n", + "Best Score: 0.9225\n", + "Accuracy: 0.9325\n", + "Confusion Matrix:\n", + " [[557 31]\n", + " [ 50 562]]\n", + "Classification Report:\n", + " precision recall f1-score support\n", + "\n", + " 0 0.92 0.95 0.93 588\n", + " 1 0.95 0.92 0.93 612\n", + "\n", + " accuracy 0.93 1200\n", + " macro avg 0.93 0.93 0.93 1200\n", + "weighted avg 0.93 0.93 0.93 1200\n", + "\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\svm\\_base.py:1244: ConvergenceWarning: Liblinear failed to converge, increase the number of iterations.\n", + "c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\utils\\optimize.py:210: ConvergenceWarning: newton-cg failed to converge. Increase the number of iterations.\n", " warnings.warn(\n" ] } ], "source": [ - "# Create a Logistic Regression model\n", - "lr = LogisticRegression()\n", "\n", - "# Define the parameter grid to search over\n", - "param_grid = {'C': [0.1, 1, 10, 100], 'solver': ['liblinear', 'lbfgs']}\n", + "param_dist = {\n", + " 'penalty': ['l1', 'l2', 'elasticnet', 'none'],\n", + " 'C': np.logspace(-4, 4, 10),\n", + " 'solver': ['newton-cg', 'lbfgs', 'liblinear', 'sag', 'saga'],\n", + " 'max_iter': [100, 500, 1000],\n", + "}\n", + "\n", + "# Create the RandomizedSearchCV object with the logistic regression model, hyperparameters, and cross-validation\n", + "log_reg = LogisticRegression()\n", + "random_search = RandomizedSearchCV(log_reg, param_dist, n_iter=100, cv=3, n_jobs=-1, verbose=1, random_state=42)\n", "\n", - "# Create a GridSearchCV object and fit it to the data\n", - "grid_search = GridSearchCV(lr, param_grid, cv=5)\n", - "grid_search.fit(X_scaled, y)\n", + "# Fit the random search to the training data\n", + "random_search.fit(X_train, y_train)\n", "\n", - "# Print the best hyperparameters and the corresponding accuracy score\n", - "print(\"Best hyperparameters: \", grid_search.best_params_)\n", - "print(\"Best accuracy score: \", grid_search.best_score_)" + "# Check the best hyperparameters found\n", + "print(\"Best Parameters:\", random_search.best_params_)\n", + "print(\"Best Score:\", random_search.best_score_)\n", + "\n", + "# Use the best estimator for predictions and evaluation\n", + "best_model = random_search.best_estimator_\n", + "y_pred = best_model.predict(X_test)\n", + "accuracy = accuracy_score(y_test, y_pred)\n", + "conf_matrix = confusion_matrix(y_test, y_pred)\n", + "report = classification_report(y_test, y_pred)\n", + "\n", + "print(\"Accuracy:\", accuracy)\n", + "print(\"Confusion Matrix:\\n\", conf_matrix)\n", + "print(\"Classification Report:\\n\", report)\n" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Predict" ] }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 13, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Accuracy: 0.9158333333333334\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "c:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\sklearn\\svm\\_base.py:1244: ConvergenceWarning: Liblinear failed to converge, increase the number of iterations.\n", - " warnings.warn(\n" - ] - } - ], + "outputs": [], + "source": [ + "test_data = pd.read_csv('TestingDataBinary.csv')" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [], "source": [ - "lr = LogisticRegression(C=100, solver='liblinear')\n", - "# Train the model on the training data\n", - "lr.fit(X_train, y_train)\n", + "# Preprocessing\n", + "X_new = test_data\n", + "X_new.replace([np.inf, -np.inf], 0, inplace=True)\n", + "\n", + "# Impute the missing values in the features data\n", + "X_imputed_new = imputer.transform(X_new)\n", "\n", - "# Predict the labels for the test data\n", - "y_pred = lr.predict(X_test)\n", + "# Scale the features data\n", + "X_scaled_new = scaler.transform(X_imputed_new)\n", "\n", - "# Evaluate the model performance\n", - "print(\"Accuracy:\", accuracy_score(y_test, y_pred))" + "# Apply PCA transformation\n", + "X_pca_new = pca.transform(X_scaled_new)" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 16, "metadata": {}, "outputs": [], "source": [ - "# Normalize the features\n", - "test_df_scaled = scaler.transform(test_df)\n", - "\n", - "# Select the top 15 features\n", - "test_df_selected = test_df_scaled[:, :top_n]\n", + "# Use the best estimator for predictions on the new data\n", + "y_pred_new = best_model.predict(X_pca_new)\n", "\n", - "# Use the chosen model to predict AQI scores for the test dataset\n", - "test_predictions = rf_reg_selected.predict(test_df_selected)\n", + "# Save the predictions to a new column in the DataFrame\n", + "test_data['predicted_marker'] = y_pred_new\n", "\n", - "# Save the predictions to the subs.csv file\n", - "submission_df = pd.DataFrame({'AQI_Bucket': test_predictions})\n", - "# submission_df.to_csv(\"C:\\Users\\andre\\Downloads\\subs.csv\", index=False)" + "# Save the updated DataFrame to a new CSV file\n", + "test_data.to_csv('TestingDataBinary_with_predictions.csv', index=False)" ] } ], diff --git a/part1.py b/part1.py deleted file mode 100644 index 251f068a0805baebb11d5dc54c9bb4a4450db15a..0000000000000000000000000000000000000000 --- a/part1.py +++ /dev/null @@ -1,25 +0,0 @@ -import pandas as pd -import numpy as np -import sklearn -import scipy -from sklearn.model_selection import train_test_split -from sklearn.metrics import accuracy_score -import matplotlib.pyplot as plt - -#Read CSV file as Pandas Dataframe -train_df = pd.read_csv('TrainingDataBinary.csv') -test_df = pd.read_csv('TestingDataBinary.csv') - -#Confirm reading of files -print(train_df.head) -print("----------------------------------") -print(test_df.head) - -# Get the summary statistics of the data -print(train_df.describe()) - -# Get the information about the columns of the DataFrame -print(train_df.info()) - -# Create a histogram to show the distribution of a column -plt.hist(train_df['marker']) \ No newline at end of file