# 这里有两个callback函数:早停和模型检查点
callbacks_list=[keras.callbacks.EarlyStopping(monitor="val_accuracy",#监控指标patience=2 #两轮内不再改善中断训练),keras.callbacks.ModelCheckpoint(filepath="checkpoint_path",monitor="val_loss",save_best_only=True)
]
#模型获取
model=get_minist_model()
model.compile(optimizer="rmsprop",loss="sparse_categorical_crossentropy",metrics=["accuracy"])model.fit(train_images,train_labels,epochs=10,callbacks=callbacks_list, #该参数使用回调函数validation_data=(val_images,val_labels))test_metrics=model.evaluate(test_images,test_labels)#计算模型在新数据上的损失和指标
predictions=model.predict(test_images)#计算模型在新数据上的分类概率

#也可以在训练完成后手动保存模型,只需调用model.save('my_checkpoint_path')。
#重新加载模型
model_new=keras.models.load_model("checkpoint_path.keras")
on_epoch_begin(epoch, logs) ←----在每轮开始时被调用
on_epoch_end(epoch, logs) ←----在每轮结束时被调用
on_batch_begin(batch, logs) ←----在处理每个批量之前被调用
on_batch_end(batch, logs) ←----在处理每个批量之后被调用
on_train_begin(logs) ←----在训练开始时被调用
on_train_end(logs ←----在训练结束时被调用
from matplotlib import pyplot as plt
# 实现记录每一轮中每个batch训练后的损失,并为每个epoch绘制一个图
class LossHistory(keras.callbacks.Callback):def on_train_begin(self, logs):self.per_batch_losses = []def on_batch_end(self, batch, logs):self.per_batch_losses.append(logs.get("loss"))def on_epoch_end(self, epoch, logs):plt.clf()plt.plot(range(len(self.per_batch_losses)), self.per_batch_losses,label="Training loss for each batch")plt.xlabel(f"Batch (epoch {epoch})")plt.ylabel("Loss")plt.legend()plt.savefig(f"plot_at_epoch_{epoch}")self.per_batch_losses = [] #清空,方便下一轮的技术
model = get_mnist_model()
model.compile(optimizer="rmsprop",loss="sparse_categorical_crossentropy",metrics=["accuracy"])
model.fit(train_images, train_labels,epochs=10,callbacks=[LossHistory()],validation_data=(val_images, val_labels))

def get_minist_model():inputs=keras.Input(shape=(28*28,))features=layers.Dense(512,activation="relu")(inputs)features=layers.Dropout(0.5)(features)outputs=layers.Dense(10,activation="softmax")(features)model=keras.Model(inputs,outputs)return model#datset
from tensorflow.keras.datasets import mnist
(train_images,train_labels),(test_images,test_labels)=mnist.load_data()
train_images=train_images.reshape((60000,28*28)).astype("float32")/255
test_images=test_images.reshape((10000,28*28)).astype("float32")/255
train_images,val_images=train_images[10000:],train_images[:10000]
train_labels,val_labels=train_labels[10000:],train_labels[:10000]