티스토리 뷰

# tensorflow
import tensorflow.keras as keras
import tensorflow as tf

# image processing
from tensorflow.keras.preprocessing.image import ImageDataGenerator, load_img
from tensorflow.keras.layers import Input, Dense, Activation, GlobalAveragePooling2D, Dropout


# model / neural network
from tensorflow.keras import layers
from tensorflow.keras.models import Sequential, Model

from tensorflow.keras.applications.efficientnet_v2 import EfficientNetV2M
from tensorflow.keras.applications import InceptionResNetV2, EfficientNetB0, EfficientNetV2L,ResNet152V2
import tensorboard
import os, math, datetime, random
import numpy as np

from PIL import Image, ImageEnhance

os.environ["CUDA_VISIBLE_DEVICES"] = "1"

learning_rate = 0.001
dropout_rate  = 0.4
N_EPOCHS      = 50  
N_BATCH       = 32

img_height, img_width = 160, 160

data_dir = '/train/'


image_gen_train = ImageDataGenerator(        
                                        featurewise_center=True,
                                        featurewise_std_normalization=True,
                                        rotation_range=20,
                                        width_shift_range=0.1,
                                        height_shift_range=0.1,
                                        horizontal_flip=True,
                                        rescale=1./255,
                                        brightness_range=[0.2, 1.0]

                                    )
                                    

image_gen_val = ImageDataGenerator(
                                     validation_split=0.2,
                                     rescale=1./255
                                  )

train_ds = image_gen_train.flow_from_directory(
                                            directory = data_dir,
                                            subset="training",
                                            shuffle=True,
                                            target_size=(img_height, img_width),
                                            batch_size=N_BATCH,
                                            class_mode='binary',
                                            color_mode = 'rgb'
                                            )

val_ds = image_gen_val.flow_from_directory(
                                            directory = data_dir,
                                            subset="validation",
                                            shuffle=True,
                                            target_size=(img_height, img_width),
                                            batch_size=N_BATCH,
                                            class_mode='binary',
                                            color_mode = 'rgb'
                                            )


class_names = train_ds.class_indices.keys()
print(class_names)
print(len(class_names))

def Customer_model():
 
    input_tensor = Input(shape=(img_height, img_width, 3))  
    base_model = ResNet152V2(input_tensor=input_tensor, include_top=False, weights='imagenet')
    base_model.trainable = False # 재학습 여부
    
    x = base_model.output
    x = GlobalAveragePooling2D()(x)
    x = Dropout(dropout_rate)(x)
    
    x = Dense(512, activation='relu6')(x)
    x = Dropout(dropout_rate)(x)
    
    x = Dense(256, activation='relu6')(x)
    x = Dropout(dropout_rate)(x)
    
    output = Dense(1, activation='sigmoid', kernel_regularizer='l2')(x)
    
    model = Model(inputs=input_tensor, outputs=output)
    
    return model

model = Customer_model()
model.compile(optimizer=tf.keras.optimizers.AdamW(learning_rate=learning_rate, beta_1=0.9, beta_2=0.999), loss=tf.keras.losses.BinaryCrossentropy(),
            metrics=['acc'])

total_sample=train_ds.n

#Callbacks
#tensorboard
log_dir = "logs/fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)

#Learning rate Scheduler
def scheduler(epoch, lr):
    # if epoch < 10:
    if epoch < 5:
        return lr
    else:
        return lr * tf.math.exp(-0.1)
lr_callback = tf.keras.callbacks.LearningRateScheduler(scheduler)

checkpoint_filepath = "/tmp/training_checkpoints"
model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
                                                                filepath=checkpoint_filepath,
                                                                save_weights_only=True,
                                                                monitor='val_accuracy',
                                                                mode='max',
                                                                save_best_only=True)


history = model.fit(
    train_ds,
    steps_per_epoch = train_ds.samples  // N_BATCH,
    epochs=N_EPOCHS,
    validation_data=val_ds,
    validation_steps=val_ds.samples // N_BATCH,
    callbacks=[tensorboard_callback, lr_callback, model_checkpoint_callback],
    use_multiprocessing=True, workers=8
)


# Save the entire model as a SavedModel.
model.save('saved_model/my_model')

# tensorflow import tensorflow.keras as keras import tensorflow as tf # image processing from tensorflow.keras.preprocessing.image import ImageDataGenerator, load_img from tensorflow.keras.layers import Input, Dense, Activation, GlobalAveragePooling2D, Dropout # model / neural network from tensorflow.keras import layers from tensorflow.keras.models import Sequential, Model from tensorflow.keras.applications.efficientnet_v2 import EfficientNetV2M from tensorflow.keras.applications import InceptionResNetV2, EfficientNetB0, EfficientNetV2L,ResNet152V2 import tensorboard import os, math, datetime, random import numpy as np from PIL import Image, ImageEnhance os.environ["CUDA_VISIBLE_DEVICES"] = "1" learning_rate = 0.001 dropout_rate = 0.4 N_EPOCHS = 50 N_BATCH = 32 img_height, img_width = 160, 160 data_dir = '/train/' image_gen_train = ImageDataGenerator( featurewise_center=True, featurewise_std_normalization=True, rotation_range=20, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=True, rescale=1./255, brightness_range=[0.2, 1.0] ) image_gen_val = ImageDataGenerator( validation_split=0.2, rescale=1./255 ) train_ds = image_gen_train.flow_from_directory( directory = data_dir, subset="training", shuffle=True, target_size=(img_height, img_width), batch_size=N_BATCH, class_mode='binary', color_mode = 'rgb' ) val_ds = image_gen_val.flow_from_directory( directory = data_dir, subset="validation", shuffle=True, target_size=(img_height, img_width), batch_size=N_BATCH, class_mode='binary', color_mode = 'rgb' ) class_names = train_ds.class_indices.keys() print(class_names) print(len(class_names)) def Customer_model(): input_tensor = Input(shape=(img_height, img_width, 3)) base_model = ResNet152V2(input_tensor=input_tensor, include_top=False, weights='imagenet') base_model.trainable = False # 재학습 여부 x = base_model.output x = GlobalAveragePooling2D()(x) x = Dropout(dropout_rate)(x) x = Dense(512, activation='relu6')(x) x = Dropout(dropout_rate)(x) x = Dense(256, activation='relu6')(x) x = Dropout(dropout_rate)(x) output = Dense(1, activation='sigmoid', kernel_regularizer='l2')(x) model = Model(inputs=input_tensor, outputs=output) return model model = Customer_model() model.compile(optimizer=tf.keras.optimizers.AdamW(learning_rate=learning_rate, beta_1=0.9, beta_2=0.999), loss=tf.keras.losses.BinaryCrossentropy(), metrics=['acc']) total_sample=train_ds.n #Callbacks #tensorboard log_dir = "logs/fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S") tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1) #Learning rate Scheduler def scheduler(epoch, lr): # if epoch < 10: if epoch < 5: return lr else: return lr * tf.math.exp(-0.1) lr_callback = tf.keras.callbacks.LearningRateScheduler(scheduler) checkpoint_filepath = "/tmp/training_checkpoints" model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint( filepath=checkpoint_filepath, save_weights_only=True, monitor='val_accuracy', mode='max', save_best_only=True) history = model.fit( train_ds, steps_per_epoch = train_ds.samples // N_BATCH, epochs=N_EPOCHS, validation_data=val_ds, validation_steps=val_ds.samples // N_BATCH, callbacks=[tensorboard_callback, lr_callback, model_checkpoint_callback], use_multiprocessing=True, workers=8 ) # Save the entire model as a SavedModel. model.save('saved_model/my_model')