Run the solver and return the next solution (if any)

Return null is one of these is true:

  • steps_limit is reached
  • the todo queue is empty (eg. no reachable solution)

Property definitions

ai $ SearchSolver :: run
	# Run the solver and return the next solution (if any)
	# Return null is one of these is true:
	# * `steps_limit` is reached
	# * the `todo` queue is empty (eg. no reachable solution)
	fun run: nullable SearchNode[S,A]
	do
		if steps == 0 then start

		var nearest = nearest_solution
		loop
			# Enough work
			if steps_limit > 0 and steps >= steps_limit then break

			#print "todo={todo.length}"
			#print "  {todo.join("\n  ")}"

			# Next node, please
			if todo.is_empty then
				# iterative depth search?
				if depth_limit <= 0 or iterative_deepening <= 0 or depth_limit_reached == 0 then
					is_running = false
					break
				end

				depth_limit += iterative_deepening
				start
			end
			var node = todo.take

			# Skip `old` stuff
			# Because `Queue` has no remove :(
			if node.drop then continue

			var state = node.state

			if memorize and memorize_late then
				# Is the state already visited?
				var old = memory.get_or_null(state)
				if old != null then
					memorized += 1
					if old.cost - node.cost < problem.epsilon then continue
					revisits += 1
					if not do_revisit then
						old.revisits += 1
						continue
					end
					node.revisits = old.revisits + 1
				end
				memory[state] = node
			end

			steps += 1
			assert node.steps == 0
			node.steps = steps
			self.node = node

			# Keep trace to the nearest
			if nearest == null or node.heuristic < nearest.heuristic then
				nearest = node
				nearest_solution = node
			end

			#print "try {node}"
			#print "try {node}; todo={todo.length}"

			# Won?
			if problem.is_goal(state) then
				solution = node
				return node
			end

			# Ignore successors states if the depth limit is reached
			if depth_limit > 0 and node.depth >= depth_limit then
				depth_limit_reached += 1
				continue
			end

			# Now, expand!
			var actions = problem.actions(state)
			if actions == null then continue
			for action in actions do
				neighbors += 1

				# Fast track if no memory or late memory
				if not memorize or memorize_late then
					var new_node = node.apply_action(action)
					new_node.id = nodes
					nodes += 1
					todo.add(new_node)
					continue
				end

				# Get the state and the cost. Do not create the node yet.
				var new_state = problem.apply_action(state, action)
				var new_cost = node.cost + problem.cost(state, action, new_state)

				# So check if the state was already seen
				var old = memory.get_or_null(new_state)
				if old != null then
					memorized += 1
					# If not better, then skip
					if old.cost - new_cost < problem.epsilon then continue
					# If visited and do not revisit, then skip
					if old.steps > 0 and not do_revisit then
						old.revisits += 1
						revisits += 1
						continue
					end
					# Even if `==`, reuse the same state object so
					# * it may helps the GC
					# * user-cached things in the previous state can be reused
					new_state = old.state
				end

				# Finally, create the node
				var new_node = new SearchNode[S, A](problem, new_state, node, action, new_cost, node.depth+1)
				new_node.id = nodes
				nodes += 1

				if old == null then
					# Compute heuristic and cost
					new_node.compute_heuristic
				else
					# Reuse heuristic and update the cost
					var h = old.heuristic
					new_node.heuristic = h
					new_node.score = new_cost + h

					# Is `old` a visited node?
					if old.steps == 0 then
						# Old is still in the todo list, so drop it
						old.drop = true
					else
						# Old was visited, so revisit it
						new_node.revisits = old.revisits + 1
						revisits += 1
						#print "found {old.cost}>{new_cost}:{old.cost>new_cost} d={old.cost-new_cost}\n\t{old}\nthat is worse than\n\t{new_node}"
					end
				end
				memory[new_state] = new_node

				todo.add(new_node)
			end
		end
		return null
	end
lib/ai/search.nit:386,2--531,4