楼主: Lisrelchen
1645 12

Github:Stanford Machine Learning Course [推广有奖]

  • 0关注
  • 62粉丝

VIP

已卖:4192份资源

院士

67%

还不是VIP/贵宾

-

TA的文库  其他...

Bayesian NewOccidental

Spatial Data Analysis

东西方数据挖掘

威望
0
论坛币
50278 个
通用积分
83.5106
学术水平
253 点
热心指数
300 点
信用等级
208 点
经验
41518 点
帖子
3256
精华
14
在线时间
766 小时
注册时间
2006-5-4
最后登录
2022-11-6

楼主
Lisrelchen 发表于 2016-4-25 07:15:32 |AI写论文

+2 论坛币
k人 参与回答

经管之家送您一份

应届毕业生专属福利!

求职就业群
赵安豆老师微信:zhaoandou666

经管之家联合CDA

送您一个全额奖学金名额~ !

感谢您参与论坛问题回答

经管之家送您两个论坛币!

+2 论坛币

本帖隐藏的内容

https://github.com/zhouxc/Stanford-Machine-Learning-Course


二维码

扫码加我 拉你入群

请注明:姓名-公司-职位

以便审核进群资格,未注明则拒绝

关键词:Learning stanford earning machine GitHub

沙发
Lisrelchen 发表于 2016-4-25 07:17:27
  1. #! /usr/bin/python
  2. # driver.py

  3. # imports
  4. from __future__ import division
  5. from optparse import OptionParser
  6. import sys
  7. import os

  8. from util import *
  9. from dataset import DataSet
  10. from hmm import *

  11. import sys


  12. def split_into_categories(d):
  13.     """given a dataset d, return a dict mapping categories
  14.     to arrays of observation sequences.  Only splits the training data"""
  15.     a = {}
  16.     for seqnum in range(len(d.train_output)):
  17.         seq = d.train_output[seqnum]
  18.         category = d.states[d.train_state[seqnum][0]]
  19.         if category in a:
  20.             a[category].append(seq)
  21.         else:
  22.             a[category] = [seq]

  23.     return a


  24. def train_N_state_hmms_from_data(filename, num_states, debug=False):
  25.     """ reads all the data, then split it up into each category, and then
  26.     builds a separate hmm for each category in data """
  27.     dataset = DataSet(filename)
  28.     category_seqs = split_into_categories(dataset)
  29.    
  30.     # Build a hmm for each category in data
  31.     hmms = {}
  32.     for cat, seqs in category_seqs.items():
  33.         if debug:
  34.             print "\n\nLearning %s-state HMM for category %s" % (num_states, cat)
  35.         
  36.         model = HMM(range(num_states), dataset.outputs)
  37.         model.learn_from_observations(seqs, debug)
  38.         hmms[cat] = model
  39.         if debug:
  40.             print "The learned model for %s:" % cat
  41.             print model
  42.     return (hmms, dataset)




  43. @print_timing
  44. def compute_classification_performance(hmms, dataset, debug=False):
  45.     if debug:
  46.         print "Classifying test sequences"
  47.     total = 0
  48.     errors = 0
  49.     for seqnum in range(len(dataset.test_output)):
  50.         total += 1
  51.         seq = dataset.test_output[seqnum]
  52.         actual_category = dataset.states[dataset.test_state[seqnum][0]]
  53.         log_probs = [(cat, hmms[cat].log_prob_of_sequence(seq))
  54.                      for cat in hmms.keys()]
  55.         # Want biggest first...
  56.         log_probs.sort(lambda a,b: cmp(b[1], a[1]))
  57.         if debug:
  58.             ll_str = " ".join(["%s=%.4f" % (c, v) for c,v in log_probs])
  59.             #print "Actual: %s; [%s]" % (actual_category, ll_str)

  60.         # Sorted, so the first one is the one we predicted.
  61.         best_cat = log_probs[0][0]
  62.         if actual_category != best_cat:
  63.             errors += 1
  64.     fraction_incorrect = errors * 1.0 / total
  65.     #if debug:
  66.     print "Classification mistakes: %d / %d = %.3f" % (errors, total, fraction_incorrect)
  67.     return fraction_incorrect
  68.    

  69. def main(argv=None):
  70.     if argv is None:
  71.         argv = sys.argv

  72.     usage = "usage: %prog [options] N datafile (pass -h for more info)"
  73.     parser = OptionParser(usage)
  74.     parser.add_option("-v", "--verbose",
  75.                       action="store_true", dest="verbose", default=False,
  76.                       help="Print extra debugging info")

  77.     (options, args) = parser.parse_args(argv[1:])
  78.     if len(args) != 2:
  79.         print "ERROR: Missing arguments"
  80.         parser.print_usage()
  81.         sys.exit(1)
  82.         
  83.     num_states = int(args[0])
  84.     filename = args[1]
  85.     filename = normalize_filename(filename)

  86.     # Read all the data, then split it up into each category
  87.     # Build models from the category data files
  88.     hmms, dataset = train_N_state_hmms_from_data(filename, num_states, options.verbose)
  89.    
  90.     # See how well we do in classifying test sequences
  91.     fraction_incorrect = compute_classification_performance(hmms, dataset, options.verbose)   
  92.    
  93.     return 0

  94. if __name__ == "__main__":
  95.     sys.exit(main())
复制代码

藤椅
Lisrelchen 发表于 2016-4-25 07:22:02
  1. function [theta, J_history] = gradientDescent(X, y, theta, alpha, num_iters)
  2. %GRADIENTDESCENT Performs gradient descent to learn theta
  3. %   theta = GRADIENTDESENT(X, y, theta, alpha, num_iters) updates theta by
  4. %   taking num_iters gradient steps with learning rate alpha

  5. % Initialize some useful values
  6. m = length(y); % number of training examples
  7. J_history = zeros(num_iters, 1);

  8. for iter = 1:num_iters

  9.     % ====================== YOUR CODE HERE ======================
  10.     % Instructions: Perform a single gradient step on the parameter vector
  11.     %               theta.
  12.     %
  13.     % Hint: While debugging, it can be useful to print out the values
  14.     %       of the cost function (computeCost) and gradient here.
  15.     %

  16.         H = X * theta;
  17.         T = [0 ; 0];
  18.         for i = 1 : m,
  19.                 T = T + (H(i) - y(i)) * X(i,:)';       
  20.         end
  21.        
  22.         theta = theta - (alpha * T) / m;
  23.        
  24.     % ============================================================

  25.     % Save the cost J in every iteration   
  26.     J_history(iter) = computeCost(X, y, theta);

  27. end

  28. end
复制代码

板凳
Lisrelchen 发表于 2016-4-25 07:24:09
  1. %% Machine Learning Online Class - Exercise 3 | Part 1: One-vs-all

  2. %  Instructions
  3. %  ------------
  4. %
  5. %  This file contains code that helps you get started on the
  6. %  linear exercise. You will need to complete the following functions
  7. %  in this exericse:
  8. %
  9. %     lrCostFunction.m (logistic regression cost function)
  10. %     oneVsAll.m
  11. %     predictOneVsAll.m
  12. %     predict.m
  13. %
  14. %  For this exercise, you will not need to change any code in this file,
  15. %  or any other files other than those mentioned above.
  16. %

  17. %% Initialization
  18. clear ; close all; clc

  19. %% Setup the parameters you will use for this part of the exercise
  20. input_layer_size  = 400;  % 20x20 Input Images of Digits
  21. num_labels = 10;          % 10 labels, from 1 to 10   
  22.                           % (note that we have mapped "0" to label 10)

  23. %% =========== Part 1: Loading and Visualizing Data =============
  24. %  We start the exercise by first loading and visualizing the dataset.
  25. %  You will be working with a dataset that contains handwritten digits.
  26. %

  27. % Load Training Data
  28. fprintf('Loading and Visualizing Data ...\n')

  29. load('ex3data1.mat'); % training data stored in arrays X, y
  30. m = size(X, 1);

  31. % Randomly select 100 data points to display
  32. rand_indices = randperm(m);
  33. sel = X(rand_indices(1:100), :);

  34. displayData(sel);

  35. fprintf('Program paused. Press enter to continue.\n');
  36. pause;

  37. %% ============ Part 2: Vectorize Logistic Regression ============
  38. %  In this part of the exercise, you will reuse your logistic regression
  39. %  code from the last exercise. You task here is to make sure that your
  40. %  regularized logistic regression implementation is vectorized. After
  41. %  that, you will implement one-vs-all classification for the handwritten
  42. %  digit dataset.
  43. %

  44. fprintf('\nTraining One-vs-All Logistic Regression...\n')

  45. lambda = 0.1;
  46. [all_theta] = oneVsAll(X, y, num_labels, lambda);

  47. fprintf('Program paused. Press enter to continue.\n');
  48. pause;


  49. %% ================ Part 3: Predict for One-Vs-All ================
  50. %  After ...
  51. pred = predictOneVsAll(all_theta, X);

  52. fprintf('\nTraining Set Accuracy: %f\n', mean(double(pred == y)) * 100);
复制代码

报纸
liydxjtu 发表于 2016-4-25 07:34:18
你好啊

地板
michaelshyong 发表于 2016-4-25 07:38:50
回帖看内容

7
smartlife 在职认证  发表于 2016-4-25 07:50:24

8
cxzbb 发表于 2016-4-25 07:55:30
感谢分享!

9
yangbing1008 发表于 2016-4-25 08:14:27
感谢分享

10
yangbing1008 发表于 2016-4-25 08:15:35
感谢分享

您需要登录后才可以回帖 登录 | 我要注册

本版微信群
加好友,备注jltj
拉您入交流群
GMT+8, 2025-12-9 05:57