楼主: Lisrelchen
1092 4

[Microsoft Cognitive Toolkit]MultiLayer Perceptron using CNTK [推广有奖]

  • 0关注
  • 62粉丝

VIP

已卖:4194份资源

院士

67%

还不是VIP/贵宾

-

TA的文库  其他...

Bayesian NewOccidental

Spatial Data Analysis

东西方数据挖掘

威望
0
论坛币
50288 个
通用积分
83.6306
学术水平
253 点
热心指数
300 点
信用等级
208 点
经验
41518 点
帖子
3256
精华
14
在线时间
766 小时
注册时间
2006-5-4
最后登录
2022-11-6

楼主
Lisrelchen 发表于 2017-9-17 02:44:40 |AI写论文

+2 论坛币
k人 参与回答

经管之家送您一份

应届毕业生专属福利!

求职就业群
赵安豆老师微信:zhaoandou666

经管之家联合CDA

送您一个全额奖学金名额~ !

感谢您参与论坛问题回答

经管之家送您两个论坛币!

+2 论坛币
  1. In this tutorial, we train a multi-layer perceptron on MNIST data. This notebook provides the recipe using Python APIs. If you are looking for this example in BrainScript, please look here
  2. Introduction
  3. Problem As in CNTK 103B, we will continue to work on the same problem of recognizing digits in MNIST data. The MNIST data comprises hand-written digits with little background noise.
复制代码

本帖隐藏的内容

MultiLayer Perceptron using CNTK.pdf (100.5 KB)


二维码

扫码加我 拉你入群

请注明:姓名-公司-职位

以便审核进群资格,未注明则拒绝

关键词:Microsoft Cognitive Toolkit Micro soft

沙发
Lisrelchen 发表于 2017-9-17 02:45:41
  1. # Copyright (c) Microsoft. All rights reserved.

  2. # Licensed under the MIT license. See LICENSE.md file in the project root
  3. # for full license information.
  4. # ==============================================================================

  5. import argparse
  6. import numpy as np
  7. import sys
  8. import os
  9. import cntk as C
  10. from cntk.train import Trainer, minibatch_size_schedule
  11. from cntk.io import MinibatchSource, CTFDeserializer, StreamDef, StreamDefs, INFINITELY_REPEAT
  12. from cntk.device import cpu, try_set_default_device
  13. from cntk.learners import adadelta, learning_rate_schedule, UnitType
  14. from cntk.ops import relu, element_times, constant
  15. from cntk.layers import Dense, Sequential, For
  16. from cntk.losses import cross_entropy_with_softmax
  17. from cntk.metrics import classification_error
  18. from cntk.train.training_session import *
  19. from cntk.logging import ProgressPrinter, TensorBoardProgressWriter

  20. abs_path = os.path.dirname(os.path.abspath(__file__))

  21. def check_path(path):
  22.     if not os.path.exists(path):
  23.         readme_file = os.path.normpath(os.path.join(
  24.             os.path.dirname(path), "..", "README.md"))
  25.         raise RuntimeError(
  26.             "File '%s' does not exist. Please follow the instructions at %s to download and prepare it." % (path, readme_file))

  27. def create_reader(path, is_training, input_dim, label_dim):
  28.     return MinibatchSource(CTFDeserializer(path, StreamDefs(
  29.         features  = StreamDef(field='features', shape=input_dim, is_sparse=False),
  30.         labels    = StreamDef(field='labels',   shape=label_dim, is_sparse=False)
  31.     )), randomize=is_training, max_sweeps = INFINITELY_REPEAT if is_training else 1)


  32. # Creates and trains a feedforward classification model for MNIST images

  33. def simple_mnist(tensorboard_logdir=None):
  34.     input_dim = 784
  35.     num_output_classes = 10
  36.     num_hidden_layers = 1
  37.     hidden_layers_dim = 200

  38.     # Input variables denoting the features and label data
  39.     feature = C.input_variable(input_dim, np.float32)
  40.     label = C.input_variable(num_output_classes, np.float32)

  41.     # Instantiate the feedforward classification model
  42.     scaled_input = element_times(constant(0.00390625), feature)

  43.     z = Sequential([For(range(num_hidden_layers), lambda i: Dense(hidden_layers_dim, activation=relu)),
  44.                     Dense(num_output_classes)])(scaled_input)

  45.     ce = cross_entropy_with_softmax(z, label)
  46.     pe = classification_error(z, label)

  47.     data_dir = os.path.join(abs_path, "..", "..", "..", "DataSets", "MNIST")

  48.     path = os.path.normpath(os.path.join(data_dir, "Train-28x28_cntk_text.txt"))
  49.     check_path(path)

  50.     reader_train = create_reader(path, True, input_dim, num_output_classes)

  51.     input_map = {
  52.         feature  : reader_train.streams.features,
  53.         label  : reader_train.streams.labels
  54.     }

  55.     # Training config
  56.     minibatch_size = 64
  57.     num_samples_per_sweep = 60000
  58.     num_sweeps_to_train_with = 10

  59.     # Instantiate progress writers.
  60.     #training_progress_output_freq = 100
  61.     progress_writers = [ProgressPrinter(
  62.         #freq=training_progress_output_freq,
  63.         tag='Training',
  64.         num_epochs=num_sweeps_to_train_with)]

  65.     if tensorboard_logdir is not None:
  66.         progress_writers.append(TensorBoardProgressWriter(freq=10, log_dir=tensorboard_logdir, model=z))

  67.     # Instantiate the trainer object to drive the model training
  68.     lr = learning_rate_schedule(1, UnitType.sample)
  69.     trainer = Trainer(z, (ce, pe), adadelta(z.parameters, lr), progress_writers)

  70.     training_session(
  71.         trainer=trainer,
  72.         mb_source = reader_train,
  73.         mb_size = minibatch_size,
  74.         model_inputs_to_streams = input_map,
  75.         max_samples = num_samples_per_sweep * num_sweeps_to_train_with,
  76.         progress_frequency=num_samples_per_sweep
  77.     ).train()
  78.    
  79.     # Load test data
  80.     path = os.path.normpath(os.path.join(data_dir, "Test-28x28_cntk_text.txt"))
  81.     check_path(path)

  82.     reader_test = create_reader(path, False, input_dim, num_output_classes)

  83.     input_map = {
  84.         feature  : reader_test.streams.features,
  85.         label  : reader_test.streams.labels
  86.     }

  87.     # Test data for trained model
  88.     test_minibatch_size = 1024
  89.     num_samples = 10000
  90.     num_minibatches_to_test = num_samples / test_minibatch_size
  91.     test_result = 0.0
  92.     for i in range(0, int(num_minibatches_to_test)):
  93.         mb = reader_test.next_minibatch(test_minibatch_size, input_map=input_map)
  94.         eval_error = trainer.test_minibatch(mb)
  95.         test_result = test_result + eval_error

  96.     # Average of evaluation errors of all test minibatches
  97.     return test_result / num_minibatches_to_test


  98. if __name__=='__main__':
  99.     # Specify the target device to be used for computing, if you do not want to
  100.     # use the best available one, e.g.
  101.     # try_set_default_device(cpu())

  102.     parser = argparse.ArgumentParser()
  103.     parser.add_argument('-tensorboard_logdir', '--tensorboard_logdir',
  104.                         help='Directory where TensorBoard logs should be created', required=False, default=None)
  105.     args = vars(parser.parse_args())

  106.     error = simple_mnist(args['tensorboard_logdir'])
  107.     print("Error: %f" % error)
复制代码

藤椅
auirzxp 学生认证  发表于 2017-9-17 03:03:36
提示: 作者被禁止或删除 内容自动屏蔽

板凳
MouJack007 发表于 2017-9-17 03:13:10
谢谢楼主分享!

报纸
MouJack007 发表于 2017-9-17 03:13:28

您需要登录后才可以回帖 登录 | 我要注册

本版微信群
加好友,备注jltj
拉您入交流群
GMT+8, 2025-12-31 19:31