yu4u / age-gender-estimation

Keras implementation of a CNN network for age and gender estimation

Geek Repo:Geek Repo

Github PK Tool:Github PK Tool

Training with pretrained VGG16 wrong

DoDDnice6 opened this issue · comments

I read the section code in age_estimation and tried to revise the age and gender estimation model this way. When I use ResNet50 and InceptionResNetV2, there is no problem. But when I use VGG16, the loss is always kept at a certain level. When predicting, age and gender always return a result. I wonder if you have encountered this problem and where the error is? Thank you for your project.
`

def __getitem__(self, idx):
    batch_size = self.batch_size
    image_size = self.image_size
    x = np.zeros((batch_size, image_size, image_size, 3), dtype=np.uint8)
    y_g = np.zeros((batch_size, 1), dtype=np.int32)
    y_a = np.zeros((batch_size, 1), dtype=np.int32)

    sample_indices = self.indices[idx * batch_size:(idx + 1) * batch_size]

    for i, sample_id in enumerate(sample_indices):
        image_path, age, gender = self.image_path_and_age_gender[sample_id]
        try:
            image = cv2.imread(str(image_path))
            x[i] = self.transform_image(
                cv2.resize(image, (image_size, image_size)))
            x[i]=preprocess_input(x[i])
        except Exception as e:
            #print(str(image_path))
            self.log.write(str(image_path)+"\n")
            # print(str(e))
        age += math.floor(np.random.randn() * 2 + 0.5)
        y_a[i] = np.clip(age, 0, 100)
        y_g[i] = np.clip(gender, 0, 1)

    return x, [to_categorical(y_g, 2), to_categorical(y_a, 101)]

def len(self):
    return self.image_num

def img_path(self):
    return self.image_path_and_age_gender[0]

def on_epoch_end(self):
    self.indices = np.random.permutation(self.image_num)

def _load_db(self, db_dir, type):
    root_path = db_dir
    mat_path = str(root_path) + "/{}.mat".format(type)
    d = loadmat(mat_path)
    ages = d["age"][0]
    genders = d["gender"][0]
    fullpaths = d["full_path"]
    for i in tqdm(range(len(ages))):
        self.image_path_and_age_gender.append(
            [str(root_path + "/"+str(fullpaths[i])).strip(), ages[i], genders[i]])

if model_name == "ResNet50":
    base_model = ResNet50(include_top=False, weights='imagenet', input_shape=(
        224, 224, 3), pooling="avg")
elif model_name == "InceptionResNetV2":
    base_model = InceptionResNetV2(
        include_top=False, weights='imagenet', input_shape=(299, 299, 3), pooling="avg")
elif model_name == "VGG16":
    base_model = VGG16(include_top=False, weights='imagenet', input_shape=(
        224, 224, 3), pooling="avg")

# prediction = Dense(units=101, kernel_initializer="he_normal", use_bias=False, activation="softmax",
#                    name="pred_age")(base_model.output)

# model = Model(inputs=base_model.input, outputs=prediction)
predictions_g = Dense(units=2, kernel_initializer="he_normal", use_bias=False, activation="softmax",
                      name="pred_gender")(base_model.output)
predictions_a = Dense(units=101, kernel_initializer="he_normal", use_bias=False, activation="softmax",
                      name="pred_age")(base_model.output)
model = Model(inputs=base_model.input, outputs=[
              predictions_g, predictions_a])
return model

`