Commit ec1554ca authored by anon's avatar anon
Browse files

Now with version control

parents
v1.001
# analysis.py
# -----------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
######################
# ANALYSIS QUESTIONS #
######################
# Set the given parameters to obtain the specified policies through
# value iteration.
def question2():
answerDiscount = 0.9
answerNoise = 0.2
return answerDiscount, answerNoise
def question3a():
answerDiscount = None
answerNoise = None
answerLivingReward = None
return answerDiscount, answerNoise, answerLivingReward
# If not possible, return 'NOT POSSIBLE'
def question3b():
answerDiscount = None
answerNoise = None
answerLivingReward = None
return answerDiscount, answerNoise, answerLivingReward
# If not possible, return 'NOT POSSIBLE'
def question3c():
answerDiscount = None
answerNoise = None
answerLivingReward = None
return answerDiscount, answerNoise, answerLivingReward
# If not possible, return 'NOT POSSIBLE'
def question3d():
answerDiscount = None
answerNoise = None
answerLivingReward = None
return answerDiscount, answerNoise, answerLivingReward
# If not possible, return 'NOT POSSIBLE'
def question3e():
answerDiscount = None
answerNoise = None
answerLivingReward = None
return answerDiscount, answerNoise, answerLivingReward
# If not possible, return 'NOT POSSIBLE'
def question8():
answerEpsilon = None
answerLearningRate = None
return answerEpsilon, answerLearningRate
# If not possible, return 'NOT POSSIBLE'
if __name__ == '__main__':
print('Answers to analysis questions:')
import analysis
for q in [q for q in dir(analysis) if q.startswith('question')]:
response = getattr(analysis, q)()
print(' Question %s:\t%s' % (q, str(response)))
# autograder.py
# -------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
# imports from python standard library
import grading
import imp
import optparse
import os
import re
import sys
import projectParams
import random
random.seed(0)
try:
from pacman import GameState
except:
pass
# register arguments and set default values
def readCommand(argv):
parser = optparse.OptionParser(
description='Run public tests on student code')
parser.set_defaults(generateSolutions=False, edxOutput=False, gsOutput=False,
muteOutput=False, printTestCase=False, noGraphics=False)
parser.add_option('--test-directory',
dest='testRoot',
default='test_cases',
help='Root test directory which contains subdirectories corresponding to each question')
parser.add_option('--student-code',
dest='studentCode',
default=projectParams.STUDENT_CODE_DEFAULT,
help='comma separated list of student code files')
parser.add_option('--code-directory',
dest='codeRoot',
default="",
help='Root directory containing the student and testClass code')
parser.add_option('--test-case-code',
dest='testCaseCode',
default=projectParams.PROJECT_TEST_CLASSES,
help='class containing testClass classes for this project')
parser.add_option('--generate-solutions',
dest='generateSolutions',
action='store_true',
help='Write solutions generated to .solution file')
parser.add_option('--edx-output',
dest='edxOutput',
action='store_true',
help='Generate edX output files')
parser.add_option('--gradescope-output',
dest='gsOutput',
action='store_true',
help='Generate GradeScope output files')
parser.add_option('--mute',
dest='muteOutput',
action='store_true',
help='Mute output from executing tests')
parser.add_option('--print-tests', '-p',
dest='printTestCase',
action='store_true',
help='Print each test case before running them.')
parser.add_option('--test', '-t',
dest='runTest',
default=None,
help='Run one particular test. Relative to test root.')
parser.add_option('--question', '-q',
dest='gradeQuestion',
default=None,
help='Grade one particular question.')
parser.add_option('--no-graphics',
dest='noGraphics',
action='store_true',
help='No graphics display for pacman games.')
(options, args) = parser.parse_args(argv)
return options
# confirm we should author solution files
def confirmGenerate():
print('WARNING: this action will overwrite any solution files.')
print('Are you sure you want to proceed? (yes/no)')
while True:
ans = sys.stdin.readline().strip()
if ans == 'yes':
break
elif ans == 'no':
sys.exit(0)
else:
print('please answer either "yes" or "no"')
# TODO: Fix this so that it tracebacks work correctly
# Looking at source of the traceback module, presuming it works
# the same as the intepreters, it uses co_filename. This is,
# however, a readonly attribute.
def setModuleName(module, filename):
functionType = type(confirmGenerate)
classType = type(optparse.Option)
for i in dir(module):
o = getattr(module, i)
if hasattr(o, '__file__'):
continue
if type(o) == functionType:
setattr(o, '__file__', filename)
elif type(o) == classType:
setattr(o, '__file__', filename)
# TODO: assign member __file__'s?
# print i, type(o)
#from cStringIO import StringIO
def loadModuleString(moduleSource):
# Below broken, imp doesn't believe its being passed a file:
# ValueError: load_module arg#2 should be a file or None
#
#f = StringIO(moduleCodeDict[k])
#tmp = imp.load_module(k, f, k, (".py", "r", imp.PY_SOURCE))
tmp = imp.new_module(k)
exec(moduleCodeDict[k], tmp.__dict__)
setModuleName(tmp, k)
return tmp
import py_compile
def loadModuleFile(moduleName, filePath):
with open(filePath, 'r') as f:
return imp.load_module(moduleName, f, "%s.py" % moduleName, (".py", "r", imp.PY_SOURCE))
def readFile(path, root=""):
"Read file from disk at specified path and return as string"
with open(os.path.join(root, path), 'r') as handle:
return handle.read()
#######################################################################
# Error Hint Map
#######################################################################
# TODO: use these
ERROR_HINT_MAP = {
'q1': {
"<type 'exceptions.IndexError'>": """
We noticed that your project threw an IndexError on q1.
While many things may cause this, it may have been from
assuming a certain number of successors from a state space
or assuming a certain number of actions available from a given
state. Try making your code more general (no hardcoded indices)
and submit again!
"""
},
'q3': {
"<type 'exceptions.AttributeError'>": """
We noticed that your project threw an AttributeError on q3.
While many things may cause this, it may have been from assuming
a certain size or structure to the state space. For example, if you have
a line of code assuming that the state is (x, y) and we run your code
on a state space with (x, y, z), this error could be thrown. Try
making your code more general and submit again!
"""
}
}
import pprint
def splitStrings(d):
d2 = dict(d)
for k in d:
if k[0:2] == "__":
del d2[k]
continue
if d2[k].find("\n") >= 0:
d2[k] = d2[k].split("\n")
return d2
def printTest(testDict, solutionDict):
pp = pprint.PrettyPrinter(indent=4)
print("Test case:")
for line in testDict["__raw_lines__"]:
print((" |", line))
print("Solution:")
for line in solutionDict["__raw_lines__"]:
print((" |", line))
def runTest(testName, moduleDict, printTestCase=False, display=None):
import testParser
import testClasses
for module in moduleDict:
setattr(sys.modules[__name__], module, moduleDict[module])
testDict = testParser.TestParser(testName + ".test").parse()
solutionDict = testParser.TestParser(testName + ".solution").parse()
test_out_file = os.path.join('%s.test_output' % testName)
testDict['test_out_file'] = test_out_file
testClass = getattr(projectTestClasses, testDict['class'])
questionClass = getattr(testClasses, 'Question')
question = questionClass({'max_points': 0}, display)
testCase = testClass(question, testDict)
if printTestCase:
printTest(testDict, solutionDict)
# This is a fragile hack to create a stub grades object
grades = grading.Grades(projectParams.PROJECT_NAME, [(None, 0)])
testCase.execute(grades, moduleDict, solutionDict)
# returns all the tests you need to run in order to run question
def getDepends(testParser, testRoot, question):
allDeps = [question]
questionDict = testParser.TestParser(
os.path.join(testRoot, question, 'CONFIG')).parse()
if 'depends' in questionDict:
depends = questionDict['depends'].split()
for d in depends:
# run dependencies first
allDeps = getDepends(testParser, testRoot, d) + allDeps
return allDeps
# get list of questions to grade
def getTestSubdirs(testParser, testRoot, questionToGrade):
problemDict = testParser.TestParser(
os.path.join(testRoot, 'CONFIG')).parse()
if questionToGrade != None:
questions = getDepends(testParser, testRoot, questionToGrade)
if len(questions) > 1:
print(('Note: due to dependencies, the following tests will be run: %s' %
' '.join(questions)))
return questions
if 'order' in problemDict:
return problemDict['order'].split()
return sorted(os.listdir(testRoot))
# evaluate student code
def evaluate(generateSolutions, testRoot, moduleDict, exceptionMap=ERROR_HINT_MAP,
edxOutput=False, muteOutput=False, gsOutput=False,
printTestCase=False, questionToGrade=None, display=None):
# imports of testbench code. note that the testClasses import must follow
# the import of student code due to dependencies
import testParser
import testClasses
for module in moduleDict:
setattr(sys.modules[__name__], module, moduleDict[module])
questions = []
questionDicts = {}
test_subdirs = getTestSubdirs(testParser, testRoot, questionToGrade)
for q in test_subdirs:
subdir_path = os.path.join(testRoot, q)
if not os.path.isdir(subdir_path) or q[0] == '.':
continue
# create a question object
questionDict = testParser.TestParser(
os.path.join(subdir_path, 'CONFIG')).parse()
questionClass = getattr(testClasses, questionDict['class'])
question = questionClass(questionDict, display)
questionDicts[q] = questionDict
# load test cases into question
tests = [t for t in os.listdir(
subdir_path) if re.match('[^#~.].*\.test\Z', t)]
tests = [re.match('(.*)\.test\Z', t).group(1) for t in tests]
for t in sorted(tests):
test_file = os.path.join(subdir_path, '%s.test' % t)
solution_file = os.path.join(subdir_path, '%s.solution' % t)
test_out_file = os.path.join(subdir_path, '%s.test_output' % t)
testDict = testParser.TestParser(test_file).parse()
if testDict.get("disabled", "false").lower() == "true":
continue
testDict['test_out_file'] = test_out_file
testClass = getattr(projectTestClasses, testDict['class'])
testCase = testClass(question, testDict)
def makefun(testCase, solution_file):
if generateSolutions:
# write solution file to disk
return lambda grades: testCase.writeSolution(moduleDict, solution_file)
else:
# read in solution dictionary and pass as an argument
testDict = testParser.TestParser(test_file).parse()
solutionDict = testParser.TestParser(solution_file).parse()
if printTestCase:
return lambda grades: printTest(testDict, solutionDict) or testCase.execute(grades, moduleDict, solutionDict)
else:
return lambda grades: testCase.execute(grades, moduleDict, solutionDict)
question.addTestCase(testCase, makefun(testCase, solution_file))
# Note extra function is necessary for scoping reasons
def makefun(question):
return lambda grades: question.execute(grades)
setattr(sys.modules[__name__], q, makefun(question))
questions.append((q, question.getMaxPoints()))
grades = grading.Grades(projectParams.PROJECT_NAME, questions,
gsOutput=gsOutput, edxOutput=edxOutput, muteOutput=muteOutput)
if questionToGrade == None:
for q in questionDicts:
for prereq in questionDicts[q].get('depends', '').split():
grades.addPrereq(q, prereq)
grades.grade(sys.modules[__name__], bonusPic=projectParams.BONUS_PIC)
return grades.points
def getDisplay(graphicsByDefault, options=None):
graphics = graphicsByDefault
if options is not None and options.noGraphics:
graphics = False
if graphics:
try:
import graphicsDisplay
return graphicsDisplay.PacmanGraphics(1, frameTime=.05)
except ImportError:
pass
import textDisplay
return textDisplay.NullGraphics()
if __name__ == '__main__':
options = readCommand(sys.argv)
if options.generateSolutions:
confirmGenerate()
codePaths = options.studentCode.split(',')
# moduleCodeDict = {}
# for cp in codePaths:
# moduleName = re.match('.*?([^/]*)\.py', cp).group(1)
# moduleCodeDict[moduleName] = readFile(cp, root=options.codeRoot)
# moduleCodeDict['projectTestClasses'] = readFile(options.testCaseCode, root=options.codeRoot)
# moduleDict = loadModuleDict(moduleCodeDict)
moduleDict = {}
for cp in codePaths:
moduleName = re.match('.*?([^/]*)\.py', cp).group(1)
moduleDict[moduleName] = loadModuleFile(
moduleName, os.path.join(options.codeRoot, cp))
moduleName = re.match('.*?([^/]*)\.py', options.testCaseCode).group(1)
moduleDict['projectTestClasses'] = loadModuleFile(
moduleName, os.path.join(options.codeRoot, options.testCaseCode))
if options.runTest != None:
runTest(options.runTest, moduleDict, printTestCase=options.printTestCase,
display=getDisplay(True, options))
else:
evaluate(options.generateSolutions, options.testRoot, moduleDict,
gsOutput=options.gsOutput,
edxOutput=options.edxOutput, muteOutput=options.muteOutput, printTestCase=options.printTestCase,
questionToGrade=options.gradeQuestion, display=getDisplay(options.gradeQuestion != None, options))
# crawler.py
# ----------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
#!/usr/bin/python
import math
from math import pi as PI
import time
import environment
import random
class CrawlingRobotEnvironment(environment.Environment):
def __init__(self, crawlingRobot):
self.crawlingRobot = crawlingRobot
# The state is of the form (armAngle, handAngle)
# where the angles are bucket numbers, not actual
# degree measurements
self.state = None
self.nArmStates = 9
self.nHandStates = 13
# create a list of arm buckets and hand buckets to
# discretize the state space
minArmAngle,maxArmAngle = self.crawlingRobot.getMinAndMaxArmAngles()
minHandAngle,maxHandAngle = self.crawlingRobot.getMinAndMaxHandAngles()
armIncrement = (maxArmAngle - minArmAngle) / (self.nArmStates-1)
handIncrement = (maxHandAngle - minHandAngle) / (self.nHandStates-1)
self.armBuckets = [minArmAngle+(armIncrement*i) \
for i in range(self.nArmStates)]
self.handBuckets = [minHandAngle+(handIncrement*i) \
for i in range(self.nHandStates)]
# Reset
self.reset()
def getCurrentState(self):
"""
Return the current state
of the crawling robot
"""
return self.state
def getPossibleActions(self, state):
"""
Returns possible actions
for the states in the
current state
"""
actions = list()
currArmBucket,currHandBucket = state
if currArmBucket > 0: actions.append('arm-down')
if currArmBucket < self.nArmStates-1: actions.append('arm-up')
if currHandBucket > 0: actions.append('hand-down')
if currHandBucket < self.nHandStates-1: actions.append('hand-up')
return actions
def doAction(self, action):
"""
Perform the action and update
the current state of the Environment
and return the reward for the
current state, the next state
and the taken action.
Returns:
nextState, reward
"""
nextState, reward = None, None
oldX,oldY = self.crawlingRobot.getRobotPosition()
armBucket,handBucket = self.state
armAngle,handAngle = self.crawlingRobot.getAngles()
if action == 'arm-up':
newArmAngle = self.armBuckets[armBucket+1]
self.crawlingRobot.moveArm(newArmAngle)
nextState = (armBucket+1,handBucket)
if action == 'arm-down':
newArmAngle = self.armBuckets[armBucket-1]
self.crawlingRobot.moveArm(newArmAngle)
nextState = (armBucket-1,handBucket)
if action == 'hand-up':
newHandAngle = self.handBuckets[handBucket+1]
self.crawlingRobot.moveHand(newHandAngle)
nextState = (armBucket,handBucket+1)
if action == 'hand-down':
newHandAngle = self.handBuckets[handBucket-1]
self.crawlingRobot.moveHand(newHandAngle)
nextState = (armBucket,handBucket-1)
newX,newY = self.crawlingRobot.getRobotPosition()
# a simple reward function
reward = newX - oldX
self.state = nextState
return nextState, reward
def reset(self):
"""
Resets the Environment to the initial state
"""
## Initialize the state to be the middle
## value for each parameter e.g. if there are 13 and 19
## buckets for the arm and hand parameters, then the intial
## state should be (6,9)
##
## Also call self.crawlingRobot.setAngles()
## to the initial arm and hand angle
armState = self.nArmStates//2
handState = self.nHandStates//2
self.state = armState,handState
self.crawlingRobot.setAngles(self.armBuckets[armState],self.handBuckets[handState])
self.crawlingRobot.positions = [20,self.crawlingRobot.getRobotPosition()[0]]
class CrawlingRobot:
def setAngles(self, armAngle, handAngle):
"""
set the robot's arm and hand angles
to the passed in values
"""
self.armAngle = armAngle
self.handAngle = handAngle
def getAngles(self):
"""
returns the pair of (armAngle, handAngle)
"""
return self.armAngle, self.handAngle
def getRobotPosition(self):
"""