Commit 764bcb86 authored by Michael Ritzert's avatar Michael Ritzert
Browse files

Import the new scripts from version 191 of the tools.

parent 8057f8af
# Flowkit v18.10-p005_1
# Flowkit v19.10-s014_1
################################################################################
# This file contains 'create_flow_step' content for steps which are required
# in an implementation flow, but whose contents are specific. Review all
......@@ -7,6 +7,14 @@
# using the 'flow_config.tcl' file.
################################################################################
##############################################################################
# STEP set_dont_use
##############################################################################
create_flow_step -name set_dont_use -owner design {
#- disable base_cell usage during optimization
<%? {dont_use_cells} return "foreach base_cell_name [list [get_flow_config dont_use_cells]] {set_db \[get_db base_cells \$base_cell_name\] .dont_use true}" %>
}
##############################################################################
# STEP read_hdl
##############################################################################
......@@ -65,14 +73,6 @@ create_flow_step -name read_hdl -owner design {
#read_tcf -tcf_instance DUT ../top/verification/xmsim.tcf
}
##############################################################################
# STEP set_dont_use
##############################################################################
create_flow_step -name set_dont_use -owner design {
#- disable cell usage
<%? {dont_use_cells} return "foreach cell [list [get_flow_config dont_use_cells]] { set_db \[get_db base_cells \$cell\] .dont_use true }" %>
}
##############################################################################
# STEP init_floorplan
##############################################################################
......@@ -82,10 +82,7 @@ create_flow_step -name init_floorplan -owner design {
<%? {init_def_files} return "foreach def_file [list [get_flow_config init_def_files]] { read_def \$def_file }" %>
#- update power_intent after floorplan additions
<%? {commit_power_intent_options} return "commit_power_intent [get_flow_config commit_power_intent_options]" else return "commit_power_intent" %>
#- finish floorplan with auto-blockage insertion
finish_floorplan -fill_place_blockage soft 20.0
commit_power_intent
} -check {
foreach file [get_flow_config -quiet init_floorplan_file] {
check "[file exists $file] && [file readable $file]" "The floorplan file: $file was not found or is not readable."
......
# Flowkit v18.10-p005_1
# Flowkit v19.10-s014_1
################################################################################
# This file contains 'create_flow_step' content for steps which are required
# in an implementation flow, but whose contents are specific. Review all
......@@ -7,6 +7,14 @@
# using the 'flow_config.tcl' file.
################################################################################
##############################################################################
# STEP set_dont_use
##############################################################################
create_flow_step -name set_dont_use -owner design {
#- disable base_cell usage during optimization
<%? {dont_use_cells} return "foreach base_cell_name [list [get_flow_config dont_use_cells]] {set_db \[get_db base_cells \$base_cell_name\] .dont_use true}" %>
}
##############################################################################
# STEP read_hdl
##############################################################################
......@@ -16,14 +24,6 @@ create_flow_step -name read_hdl -owner design {
elaborate < PLACEHOLDER: ELABORATION OPTIONS >
}
##############################################################################
# STEP set_dont_use
##############################################################################
create_flow_step -name set_dont_use -owner design {
#- disable cell usage
<%? {dont_use_cells} return "foreach cell [list [get_flow_config dont_use_cells]] { set_db \[get_db base_cells \$cell\] .dont_use true }" %>
}
##############################################################################
# STEP init_floorplan
##############################################################################
......@@ -33,10 +33,7 @@ create_flow_step -name init_floorplan -owner design {
<%? {init_def_files} return "foreach def_file [list [get_flow_config init_def_files]] { read_def \$def_file }" %>
#- update power_intent after floorplan additions
<%? {commit_power_intent_options} return "commit_power_intent [get_flow_config commit_power_intent_options]" else return "commit_power_intent" %>
#- finish floorplan with auto-blockage insertion
finish_floorplan -fill_place_blockage soft 20.0
commit_power_intent
} -check {
foreach file [get_flow_config -quiet init_floorplan_file] {
check "[file exists $file] && [file readable $file]" "The floorplan file: $file was not found or is not readable."
......
#!/usr/bin/env python
###############################################################################
# Copyright (C) 2019 Cadence Design Systems, Inc. #
# All Rights Reserved. #
# CCRNI-0013 #
# #
# This script is AEWare, provided as an example of how to perform specialized #
# tasks within SoC Encounter. It is not supported via the Cadence Hotline #
# nor the CCR system. #
# #
# This work is Cadence intellectual property and may under no circumstances #
# be given to third parties, neither in original nor in modified versions, #
# without explicit written permission from Cadence #
# #
# The information contained herein is the proprietary and confidential #
# information of Cadence or its licensors, and is supplied subject to, and #
# may be used only by Cadence's customers in accordance with, a previously #
# executed license and maintenance agreement between Cadence and its #
# customer. #
###############################################################################
#
# ============================================================================
# =
# = A distribution script to use with flowtool
# =
# ============================================================================
#
###############################################################################
import signal
import sys
import os
import getpass
import subprocess
import re
import datetime
import time
import logging
###############################################################################
# DIST CONFIGURATION
###############################################################################
def configure_dist():
global flow
# None is the default configuration
flow = {
None: {
"dist": "lsf",
"threads": 1,
"mem": 10000
},
"syn_generic": {"threads": 2},
"syn_map": {"threads": 2},
"syn_opt": {"threads": 2},
"prects": {"threads": 2},
"cts": {"threads": 2},
"postcts": {"threads": 2},
"route": {"threads": 2, "mem": 15000},
"postroute": {"threads": 2, "mem": 15000},
"sta": {"threads": 2},
"report_postroute.route": {"threads": 2},
"report_postroute.postroute": {"threads": 2}
}
global dist_config
dist_config = {
"local": {},
"lsf": {
"cmd": "< PLACEHOLDER: DIST EXECUTABLE >",
"queue": "< PLACEHOLDER: DIST QUEUE STRING >",
"args": "< PLACEHOLDER: DIST ARGS STRING >",
"resource": "< PLACEHOLDER: DIST RESOURCE STRING >"
}
}
###############################################################################
# FLOW DEPENDENT JOB CONFIGURATION
###############################################################################
global flow_dist, flow_threads, flow_mem
# distribution method
if flow_name in flow and "dist" in flow[flow_name]:
flow_dist = flow[flow_name]["dist"]
elif flow_top in flow and "dist" in flow[flow_top]:
flow_dist = flow[flow_top]["dist"]
elif os.path.basename(flow_prefix) in flow and "dist" in flow[os.path.basename(flow_prefix)]:
flow_dist = flow[os.path.basename(flow_prefix)]["dist"]
else:
flow_dist = flow[None]["dist"]
# number of threads
if flow_name in flow and "threads" in flow[flow_name]:
flow_threads = flow[flow_name]["threads"]
elif flow_top in flow and "threads" in flow[flow_top]:
flow_threads = flow[flow_top]["threads"]
elif os.path.basename(flow_prefix) in flow and "threads" in flow[os.path.basename(flow_prefix)]:
flow_threads = flow[os.path.basename(flow_prefix)]["threads"]
else:
flow_threads = flow[None]["threads"]
# maximum memory
if flow_name in flow and "mem" in flow[flow_name]:
flow_mem = flow[flow_name]["mem"]
elif flow_top in flow and "mem" in flow[flow_top]:
flow_mem = flow[flow_top]["mem"]
elif os.path.basename(flow_prefix) in flow and "mem" in flow[os.path.basename(flow_prefix)]:
flow_mem = flow[os.path.basename(flow_prefix)]["mem"]
else:
flow_mem = flow[None]["mem"]
os.environ["FLOWTOOL_NUM_CPUS"] = str(flow_threads)
#############################################################################
## MAKE FLOWTOOL RUN LOCALY
#############################################################################
if tool == "flowtool":
flow_dist = "local"
###############################################################################
# OUTPUT CONFIGURATION
###############################################################################
def configure_output():
global debug, email_addr, is_trunk, is_interactive, tool
debug = False
email_addr = None
is_trunk = tclBool(os.environ.get("FLOWTOOL_IS_TRUNK", "false"))
is_interactive = tclBool(os.environ.get("FLOWTOOL_INTERACTIVE", "false"))
tool = os.environ.get("FLOWTOOL_TOOL", "")
global flow_name, flow_child_of, flow_path, flow_top
flow_name = os.environ.get("FLOWTOOL_FLOW_PATH", "").replace(" ", ".")
flow_child_of = os.environ.get("FLOWTOOL_FLOW_CHILD_OF", "").split()
flow_top = os.environ.get("FLOWTOOL_FLOW", "").replace("flow:","")
if len(flow_child_of) > 0:
flow_child_of = flow_child_of[-1].replace("flow:","")
flow_path = flow_child_of + "." + flow_name
else:
flow_child_of = ""
flow_path = flow_name
global flow_prefix, flow_log, console
flow_prefix = os.environ.get("FLOWTOOL_LOG_PREFIX", "")
if os.path.isdir(flow_prefix):
dist_log = flow_prefix + "/dist.log"
elif flow_prefix == "":
dist_log = "dist.log"
else:
dist_log = flow_prefix + ".dist"
if os.path.isfile(dist_log):
dist_log = unique_filename(dist_log)
# set up logging to file
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M',
filename=dist_log,
filemode='w')
# setup logging to console
console = logging.StreamHandler(sys.stdout)
console.setLevel(logging.INFO)
console_style = logging.Formatter(' DIST: %(levelname)-8s %(message)s')
console.setFormatter(console_style)
logging.getLogger('').addHandler(console)
if debug:
flow_log = "output." + flow_path + ".log"
else:
flow_log = "/dev/null"
# Write out messages somewhere useful, flowtool does not save stdout by
# default (it repeats the output in debug mode)
global flow_run
flow_run = os.path.basename(os.path.abspath(os.environ.get("FLOWTOOL_DIR", "."))) + "-" + flow_path
def unique_filename(basename):
i = 0
unique = basename
while os.path.isfile(unique):
i += 1
unique = basename + str(i)
return unique
###############################################################################
# LOCAL DISTRIBUTION
###############################################################################
class DistLocal():
command = None
process = None
out = None
error = None
def __init__(self, command):
self.command = command
def alterpgid(self):
# Alter the process group ID so that we don't automatically receive Ctrl-C
# in the child process, we can then pass it on manually. This is useful in
# farm distribution methods as we can then call a kill command
pid = os.getpid()
os.setpgid(pid, 0)
def run(self):
logging.debug("FLOW: " + flow_top)
logging.debug("FLOW name: " + flow_name)
logging.debug("FLOW child: " + flow_child_of)
logging.debug("FLOW prefix: " + os.path.basename(flow_prefix))
logging.debug("DIST method: " + flow_dist)
logging.debug("DIST cmd: " + (" ".join(self.command)))
if is_interactive:
self.process = subprocess.Popen(self.command, preexec_fn=self.alterpgid, stdout=sys.stdout, stderr=sys.stderr, stdin=sys.stdin)
else:
self.process = subprocess.Popen(self.command, preexec_fn=self.alterpgid, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
if not is_interactive:
outs, errs = self.process.communicate()
self.out = outs.decode('utf-8')
if errs:
self.error = errs.decode('utf-8')
returncode = self.process.poll()
logging.debug("DIST cmd return: {}".format(returncode))
if not is_interactive:
logging.debug("DIST cmd std.err: {}".format(self.error))
if flow_dist != "local":
logging.debug("DIST cmd std.out: {}\n".format(self.out))
if returncode != 0:
logging.error("DIST: Process exited abnormally")
self.process = None
return returncode
def kill(self, sig):
if self.process is not None:
self.process.send_signal(sig)
###############################################################################
# LSF DISTRIBUTION
###############################################################################
class DistLSF(DistLocal):
lsf_exec = None
lsf_queue = None
jobid = None
def __init__(self, command):
DistLocal.__init__(self, command)
self.lsf_exec = dist_config["lsf"]["cmd"]
self.lsf_queue = dist_config["lsf"]["queue"].split()
lsf_args = dist_config["lsf"]["args"].split()
lsf_resource = dist_config["lsf"]["resource"]
lsf_cmd = [self.lsf_exec, "-n", str(flow_threads), "-J", flow_run, "-R", lsf_resource + " rusage[mem=" + str(flow_mem) + "]"] + lsf_args + ["-q"] + self.lsf_queue
if is_interactive:
self.command = lsf_cmd + ["-Is"] + self.command
else:
self.command = lsf_cmd + ["-o", flow_log] + self.command
def format_bjobs_time(self, secondstr):
seconds = secondstr.split()
if len(seconds) > 0 and seconds[0].isnumeric():
seconds = int(seconds[0]);
days = seconds//86400;
seconds %= 86400;
hours = seconds//3600;
seconds %= 3600;
minutes = seconds//60;
seconds %= 60;
return "{0:0>2}:{1:0>2}:{2:0>2}:{3:0>2}".format(days, hours, minutes, seconds)
return secondstr
def run(self):
code = DistLocal.run(self)
if is_interactive or code != 0:
return code
# we are running non-interactive, poll bjobs to detect when job finishes
if re.match(r".*Job <([0-9]+)> is submitted to queue.*", self.out):
self.jobid = re.sub(r".*Job <([0-9]+)> is submitted to queue.*", r"\1", self.out).strip()
logging.debug("DIST job: " + self.jobid)
running = True
poll_limit = 3 * 60
poll_incr = 10
poll_time = 0
bjobs_exec = os.path.join(os.path.dirname(self.lsf_exec), "bjobs")
bjobs_command = [bjobs_exec, "-noheader", "-o", 'id stat name run_time mem delimiter="^"', "-q"] + self.lsf_queue
bjobs_command += [self.jobid]
logging.debug("DIST poll: " + (" ".join(bjobs_command)))
while running:
bjobs_process = subprocess.Popen(bjobs_command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
bjobs_output, bjobs_err = bjobs_process.communicate()
bjobs_output = bjobs_output.decode('utf-8').strip()
bjobs_list = bjobs_output.split("^")
logging.info("{0} {1[0]:>5} {1[1]:>5} {2:>11} {1[4]:>10} {1[2]}".format(datetime.datetime.now().strftime("%H:%M:%S"), bjobs_list, self.format_bjobs_time(bjobs_list[3])))
console.flush()
if bjobs_list[1] == "UNKNWN" or bjobs_list[1] == "EXIT":
running = False
if email_addr is not None:
bhist = subprocess.Popen([os.path.join(os.path.dirname(self.lsf_exec), "bhist"), "-q"] + self.lsf_queue + ["-l", self.jobid], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
bhist_out, bhist_err = bhist.communicate()
mail = subprocess.Popen(["mail", "-s", flow_run + "-" + bjobs_list[1], email_addr], stdin=subprocess.PIPE, universal_newlines=True)
mail.communicate(bhist_out.decode('utf-8'))
return 1
elif bjobs_list[1] == "DONE":
running = False
else:
running = True
if running:
if poll_time < poll_limit:
poll_time += poll_incr
time.sleep(poll_time)
return 0
else:
logging.error("DIST failed to find job ID in message: " + self.out)
return 1
def kill(self, sig):
DistLocal.kill(self, sig)
if not is_interactive and self.jobid is not None:
logging.debug("DIST kill: " + self.jobid)
subprocess.call([os.path.join(os.path.dirname(self.lsf_exec), "bkill"), "-q"] + self.lsf_queue + [self.jobid])
###############################################################################
# HELPER FUNCTIONS
###############################################################################
def tclBool(value):
if value == "1" or value == "true" or value == "yes":
return True
elif value == "0" or value == "false" or value == "no":
return False
else:
raise Exception("Bad tcl boolean value: " + value)
def main(argv):
signal.signal(signal.SIGINT, sigintHandler)
configure_output()
configure_dist()
argv.pop(0)
global dist
if flow_dist == "lsf":
dist = DistLSF(argv)
else:
dist = DistLocal(argv)
exitstatus = dist.run()
logging.debug("DIST return: " + str(exitstatus))
logging.shutdown()
exit(exitstatus)
def sigintHandler(sig, frame):
if dist is None:
exit(1);
else:
dist.kill(sig)
if __name__ == "__main__":
main(sys.argv)
# Flowkit v18.10-p005_1
# Flowkit v19.10-s014_1
################################################################################
# ECO Definition
################################################################################
......
This diff is collapsed.
%TAG ! tag:flow.stylus.cadence.com,0.1:
---
#############################################################################################
# Flow Setup
#############################################################################################
# Generated using: Flowkit v19.10-s014_1
# Command: write_flow_template -type stylus -tools { genus innovus ext confrml ssv tempus } -enable_feature { report_lec sta_glitch }
# Provide a memorable comment to describe this yaml file
remarks:
# Specify scripts that will be automatically sourced in all tools
# these scripts will be run every time each tool initializes
include_scripts:
# Specify scripts that define procs
# these are available during flow_step execution
include_procs:
#############################################################################################
# Feature Definition
#############################################################################################
# +-------------+--------------------------------------------------+----------+
# | Feature | Description | Value |
# +-------------+--------------------------------------------------+----------+
# | -report_lec | Add LEC dofile generation and checks to the flow | enabled |
# | -sta_glitch | Add glitch analysis reports to STA flow | enabled |
# +-------------+--------------------------------------------------+----------+
define_feature:
# Optional features in the above table can enabled below by adding them as space separated options
# eg: features: -report_inline -report_lec
features:
#############################################################################################
# Flow Step Definitions
#
# default_flow_steps : Flow steps that come from the tool templates (write_flow_template)
# DO NOT MODIFY
# tech_flow_steps : Technology dependent flow steps that are required to place, route and
# optimize a DRC clean design
# ip_flow_steps : Flow steps for integrating IP
# user_flow_steps : Flow steps which require user modifications to replace PLACEHOLDER
# content and fine tune the flow for specific needs
#
#############################################################################################
default_flow_steps:
- flow/common_flows.tcl
- flow/common_steps.tcl
- flow/genus_steps.tcl
- flow/innovus_steps.tcl
- flow/tempus_steps.tcl
tech_flow_steps:
ip_flow_steps:
user_flow_steps:
- design_config.tcl
- eco_config.tcl
- flow_config.tcl
- genus_config.tcl
- innovus_config.tcl
- tempus_config.tcl
# Specify the flows that will be run, in order
flow_current: synthesis implementation
#############################################################################################
# General Flow Configuration
#############################################################################################
# Identify the simple design data yaml file to be used
design_yaml: setup.yaml
#############################################################################################
# Flow Definitions
#
# Common modifications are:
# 1. comment out a step by adding in a '#', eg # - step_does_not_run:
# 2. delete a line to remove the step
# 3. reorder defined flow_steps and flows
# 4. add a single command inline, eg # CMD_<step_name>: <cmd>
# 5. add file contents inline, eg # FILE_<step_name>: <filename>
# 6. change the parameters, see the comment at the end of each line
#############################################################################################
flows:
#-------------------------------------------------------------------------------
# synthesis
#-------------------------------------------------------------------------------
synthesis:
args: -tool genus -owner cadence -skip_metric -tool_options -disable_user_startup
steps:
- syn_generic:
features: #(opt) -setup_views <val> list of setup analysis_views to activate; (opt) -hold_views <val> list of hold analysis_views to activate; (opt) -dynamic_view <val> single dynamic analysis_view to activate; (opt) -leakage_view <val> single leakage analysis_view to activate
steps:
- block_start:
- init_elaborate:
- init_design:
- read_mmmc:
- read_physical:
- read_hdl:
- read_power_intent:
- run_init_design:
- commit_power_intent:
- init_genus:
- set_dont_use:
- create_cost_group:
- run_syn_generic:
- block_finish:
- SCHEDULE: -flow report_synth -include_in_metrics
- syn_map:
features: #(opt) -setup_views <val> list of setup analysis_views to activate; (opt) -hold_views <val> list of hold analysis_views to activate; (opt) -dynamic_view <val> single dynamic analysis_view to activate; (opt) -leakage_view <val> single leakage analysis_view to activate
steps:
- block_start:
- init_genus:
- run_syn_map:
- block_finish:
- SCHEDULE: -flow report_synth -include_in_metrics
- genus_to_lec:
- syn_opt:
features: #(opt) -setup_views <val> list of setup analysis_views to activate; (opt) -hold_views <val> list of hold analysis_views to activate; (opt) -dynamic_view <val> single dynamic analysis_view to activate; (opt) -leakage_view <val> single leakage analysis_view to activate
steps:
- block_start:
- init_genus:
- run_syn_opt:
- block_finish:
- SCHEDULE: -flow report_synth -include_in_metrics
- genus_to_lec:
- genus_to_innovus:
#-------------------------------------------------------------------------------
# implementation
#-------------------------------------------------------------------------------
implementation:
args: -tool innovus -owner cadence -skip_metric -tool_options -disable_user_startup
steps:
- floorplan:
features: #(opt) -setup_views <val> list of setup analysis_views to activate; (opt) -hold_views <val> list of hold analysis_views to activate; (opt) -dynamic_view <val> single dynamic analysis_view to activate; (opt) -leakage_view <val> single leakage analysis_view to activate
steps:
- block_start:
- init_innovus:
- init_floorplan:
- add_tracks:
- block_finish:
- SCHEDULE: -flow report_floorplan -include_in_metrics
- innovus_to_lec:
- prects:
features: #(opt) -setup_views <val> list of setup analysis_views to activate; (opt) -hold_views <val> list of hold analysis_views to activate; (opt) -dynamic_view <val> single dynamic analysis_view to activate; (opt) -leakage_view <val> single leakage analysis_view to activate
steps: