Python - Dimension Reduction – Generative Adversarial Model (GAN)

Data: 

Salary list from job position (11 rows)

 

Mission:

How to generate data prediction from current existing data

 

Library used:

Matplotlib

Numpy

Pandas

PyTorch

 

Code:

import matplotlib.pyplot as plt

import numpy as np

import pandas as pd

import torch

from torch import nn

 

class Discriminator(nn.Module):

    def __init__(self):

        super().__init__()

        self.model = nn.Sequential(

            nn.Linear(2, 256),

            nn.ReLU(),

            nn.Dropout(0.3),

            nn.Linear(256, 128),

            nn.ReLU(),

            nn.Dropout(0.3),

            nn.Linear(128, 64),

            nn.ReLU(),

            nn.Dropout(0.3),

            nn.Linear(64, 1),

            nn.Sigmoid(),

        )

 

    def forward(self, x):

        output = self.model(x)

        return output

 

discriminator = Discriminator()

 

class Generator(nn.Module):

    def __init__(self):

        super().__init__()

        self.model = nn.Sequential(

            nn.Linear(2, 16),

            nn.ReLU(),

            nn.Linear(16, 32),

            nn.ReLU(),

            nn.Linear(32, 2),

        )

 

    def forward(self, x):

        output = self.model(x)

        return output

 

generator = Generator()

 

url = 'https://raw.githubusercontent.com/kokocamp/vlog120/main/vlog120.csv'

vlog138 = pd.read_csv(url)

print(vlog138)

 

X = vlog138['Level']

y = vlog138['Gaji']

 

tensor_X = torch.from_numpy(X.values)

tensor_y = torch.from_numpy(y.values)

 

train_data_length = 10

train_data = torch.zeros((train_data_length, 2))

train_data[:, 0] = tensor_X

train_data[:, 1] = tensor_y

train_labels = torch.zeros(train_data_length)

train_set = [

    (train_data[i], train_labels[i]) for i in range(train_data_length)

]

plt.plot(train_data[:, 0], train_data[:, 1], ".")

#==============================

batch_size = 10

train_loader = torch.utils.data.DataLoader(

    train_set, batch_size=batch_size, shuffle=True

)

#==============================

lr = 0.001

num_epochs = 10

loss_function = nn.BCELoss()

 

optimizer_discriminator = torch.optim.Adam(discriminator.parameters(), lr=lr)

optimizer_generator = torch.optim.Adam(generator.parameters(), lr=lr)

 

for epoch in range(num_epochs):

    for n, (real_samples, _) in enumerate(train_loader):

        # Data for training the discriminator

        real_samples_labels = torch.ones((batch_size, 1))

        latent_space_samples = torch.randn((batch_size, 2))

        generated_samples = generator(latent_space_samples)

        generated_samples_labels = torch.zeros((batch_size, 1))

        all_samples = torch.cat((real_samples, generated_samples))

        all_samples_labels = torch.cat(

            (real_samples_labels, generated_samples_labels)

        )

 

        # Training the discriminator

        discriminator.zero_grad()

        output_discriminator = discriminator(all_samples)

        loss_discriminator = loss_function(output_discriminator, all_samples_labels)

        loss_discriminator.backward()

        optimizer_discriminator.step()

 

        # Data for training the generator

        latent_space_samples = torch.randn((batch_size, 2))

 

        # Training the generator

        generator.zero_grad()

        generated_samples = generator(latent_space_samples)

        output_discriminator_generated = discriminator(generated_samples)

        loss_generator = loss_function(

            output_discriminator_generated, real_samples_labels

        )

 

        loss_generator.backward()

        optimizer_generator.step()

 

        # Show loss

        if epoch % 10 == 0 and n == batch_size - 1:

            print(f"Epoch: {epoch} Loss D.: {loss_discriminator}")

            print(f"Epoch: {epoch} Loss G.: {loss_generator}")

==============================

latent_space_samples = torch.randn(10, 2)

generated_samples = generator(latent_space_samples)

 

generated_samples = generated_samples.detach()

plt.plot(generated_samples[:, 0], generated_samples[:, 1], ".")

 

I wrapped the scenario in a Youtube video below.


 

Click this link (http://paparadit.blogspot.com/2020/11/the-algorithms-of-machine-learning.html), if you want to check out for other algorithms. Thank you for for visiting this blog & subs my channel.

Labels: ,


PS: If you've benefit from this blog,
you can support it by making a small contribution.

Enter your email address to receive feed update from this blog:

Post a Comment

 

Post a Comment

Leave comments here...