aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'ebuildgen/filetypes')
-rw-r--r--ebuildgen/filetypes/__init__.py0
-rw-r--r--ebuildgen/filetypes/acif.py145
-rw-r--r--ebuildgen/filetypes/autoconf.py484
-rw-r--r--ebuildgen/filetypes/automake.py342
-rw-r--r--ebuildgen/filetypes/ctypefiles.py229
-rw-r--r--ebuildgen/filetypes/makefilecom.py396
-rw-r--r--ebuildgen/filetypes/makefiles.py479
7 files changed, 2075 insertions, 0 deletions
diff --git a/ebuildgen/filetypes/__init__.py b/ebuildgen/filetypes/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/ebuildgen/filetypes/__init__.py
diff --git a/ebuildgen/filetypes/acif.py b/ebuildgen/filetypes/acif.py
new file mode 100644
index 0000000..1d3ed29
--- /dev/null
+++ b/ebuildgen/filetypes/acif.py
@@ -0,0 +1,145 @@
+from ply import lex
+from ply import yacc
+
+def parseif(ifoptions):
+ optstr = ""
+ for option in ifoptions:
+ optstr += option + " "
+
+ tokens = (
+ "NOT",
+ "AND",
+ "OR",
+ "EQ",
+ "NEQ",
+ "NONZERO",
+ "SEMICOL",
+ "LBRAC",
+ "RPRAC",
+ "OPT",
+ "TEST",
+ )
+
+ def t_TEST(t):
+ r"test"
+ return t
+
+ def t_AND(t):
+ r"(\-a|\&\&)"
+ return t
+
+ def t_OR(t):
+ r"(\-o|\|\|)"
+ return t
+
+ def t_EQ(t):
+ r"="
+ return t
+
+ def t_NEQ(t):
+ r"\!="
+ return t
+
+ def t_NOT(t):
+ r"\!"
+ return t
+
+ def t_NONZERO(t):
+ r"\-n"
+ return t
+
+ def t_SEMICOL(t):
+ r";"
+ pass
+
+ def t_LBRAC(t):
+ r"\{"
+ return t
+
+ def t_RPRAC(t):
+ r"\}"
+ return t
+
+ def t_space(t):
+ r"[ \t\n]"
+ pass
+
+ def t_quote(t):
+ r"[\"\']"
+ pass
+
+ def t_OPT(t):
+ r"[^ \t\n;\"\']+"
+ return t
+
+ def t_ANY_error(t):
+ print("Illegal character '%s'" % t.value[0],t.lexer.lineno)
+ t.lexer.skip(1)
+
+ lexer = lex.lex()
+
+ #lexer.input(optstr)
+ #for tok in lexer:
+ # print(tok)
+
+ #YACC
+ #Add more cases!
+
+ def p_exp(p):
+ """
+ exp : NOT TEST expopt
+ | TEST expopt
+ """
+ if len(p) == 4:
+ newlst = []
+ while len(newlst) < len(p[3]):
+ if p[3][len(newlst)+1][0] == "!":
+ newresult = p[3][len(newlst)+1][1:]
+ else:
+ newresult = "!" + p[3][len(newlst)+1]
+
+ newlst += [p[3][len(newlst)],newresult]
+
+ p[0] = newlst
+
+ else:
+ p[0] = p[2]
+
+ def p_expopt(p):
+ """
+ expopt : expopt AND expopt
+ | expopt OR expopt
+ """
+ if p[2] == "-a":
+ p[0] = p[1] + p[3]
+ else: #come up with something better
+ p[0] = p[1] + p[3]
+
+ def p_expopt2(p):
+ """
+ expopt : OPT EQ OPT
+ | OPT NEQ OPT
+ | NONZERO OPT
+ | OPT
+ """
+ if len(p) == 4:
+ if p[2] == "=":
+ varstr = p[1].split("$")
+ p[0] = [varstr[1],p[3][len(varstr[0]):]]
+ #[VARIABLEname,value to pass test]
+
+ elif p[2] == "!=":
+ varstr = p[1].split("$")
+ p[0] = [varstr[1],"!" + p[3][len(varstr[0]):]]
+
+ else:
+ varstr = p[len(p)-1].split("$")[1]
+ p[0] = [varstr, "!"] #req that the variable is nonzero to be True
+
+ def p_error(p):
+ print("syntax error at '%s'" % p.type,p.value)
+ pass
+
+ yacc.yacc()
+ return yacc.parse(optstr)
+
diff --git a/ebuildgen/filetypes/autoconf.py b/ebuildgen/filetypes/autoconf.py
new file mode 100644
index 0000000..d46d133
--- /dev/null
+++ b/ebuildgen/filetypes/autoconf.py
@@ -0,0 +1,484 @@
+from ply import lex
+from ply import yacc
+
+def scanacfile(acfile):
+ """Scan a autoconfigure (.in/.ac) file.
+
+ Returns ....
+ """
+
+ tokens = (
+ "FUNC",
+ "COMPFUNC", #complete func
+ "FUNCOPT", #func options
+ "FUNCEND",
+ "VAR",
+ "ECHO",
+ "TEXT",
+ "IF",
+ "IFCOM",
+ "ELIF",
+ "ELSE",
+ "THEN",
+ "IFEND",
+ "CASE",
+ "CASEOPT",
+ "COPTEND", #case opt end, doesn't need to be there but SHOULD
+ "CASEEND",
+ "COMMA",
+ )
+
+ states = (
+ ("func", "inclusive"),
+ ("funcopt", "exclusive"),
+ ("case", "inclusive"),
+ ("if", "inclusive"),
+ ("shellcom", "exclusive"),
+ )
+
+ def t_contline(t):
+ r"\\\n"
+ t.lexer.lineno += 1
+ pass
+
+ def t_ANY_space(t):
+ r"[ \t]"
+ pass
+
+ def t_newline(t):
+ r"\n"
+ t.lexer.lineno += 1
+ pass
+
+ def t_shfunc(t): #shell func
+ r'[a-zA-Z_][a-zA-Z0-9_]*\(\)[ \t]*{'
+ t.lexer.level = 1
+ t.lexer.push_state("shellcom")
+
+ def t_shellcom_text(t):
+ r"[^{}]+"
+
+ def t_shellcom_opb(t):
+ r"{"
+ t.lexer.level +=1
+
+ def t_shellcom_opc(t):
+ r"}"
+ t.lexer.level -=1
+
+ if t.lexer.level == 0:
+ t.lexer.pop_state()
+ pass
+
+ def t_COMPFUNC(t):
+ r'[a-zA-Z_][a-zA-Z0-9_]*\([^\\[\](\),]*\)'
+ values = t.value.split("(")
+ t.value = [values[0],values[1][:-1]]
+ return t
+
+ def t_FUNC(t):
+ r'[a-zA-Z_][a-zA-Z0-9_]*\('
+ t.lexer.push_state('func')
+ t.value = t.value[:-1] #return name of func
+ return t
+
+ def t_func_funcopt(t):
+ r'\['
+ t.lexer.code_start = t.lexer.lexpos # Record the starting position
+ t.lexer.level = 1 # Initial level
+ t.lexer.push_state('funcopt') # Enter 'ccode' state
+
+ # Rules for the ccode state
+ def t_funcopt_newcom(t):
+ r'\['
+ t.lexer.level +=1
+
+ def t_funcopt_endcom(t):
+ r'\]'
+ t.lexer.level -=1
+
+ # If closing command, return the code fragment
+ if t.lexer.level == 0:
+ t.value = t.lexer.lexdata[t.lexer.code_start-1:t.lexer.lexpos]
+ t.type = "FUNCOPT"
+ t.lexer.lineno += t.value.count('\n')
+ t.lexer.pop_state()
+ return t
+
+ def t_funcopt_opt(t):
+ r"[^\\\[\]]+"
+
+ def t_funcopt_contline(t):
+ r"\\\n"
+
+ def t_func_COMMA(t):
+ r","
+ return t
+
+ def t_func_FUNCEND(t):
+ r"\)"
+ t.lexer.pop_state()
+ return t
+
+ def t_comment(t):
+ r"(dnl|\#).*\n"
+ t.lexer.lineno += t.value.count('\n')
+ pass
+
+ def t_ECHO(t):
+ r"echo.*\n"
+ t.lexer.lineno += t.value.count('\n')
+ return t
+
+ def t_VAR(t):
+ #take var=text, var="text text", var='text text', var=`text text`
+ r"[a-zA-Z_][a-zA-Z0-9_]*=(\"[^\"]*\"|\'[^\']*\'|\`[^\`]*\`|[^() \t,\n]*)+"
+ t.lexer.lineno += t.value.count('\n')
+ return t
+
+ def t_IF(t):
+ r"if"
+ t.lexer.push_state("if")
+ return t
+
+ def t_ELIF(t):
+ r"elif"
+ t.lexer.push_state("if")
+ return t
+
+ def t_if_THEN(t):
+ r"then"
+ t.lexer.pop_state()
+ return t
+
+ def t_if_IFCOM(t):
+ r"[^ \t\n]+"
+ return t
+
+ def t_ELSE(t):
+ r"else"
+ return t
+
+ def t_IFEND(t):
+ r"fi"
+ return t
+
+ def t_CASE(t):
+ r"case.*in"
+ t.lexer.push_state("case")
+ return t
+
+ def t_CASEEND(t):
+ r"esac"
+ t.lexer.pop_state()
+ return t
+
+ def t_case_CASEOPT(t):
+ r"[^\n\t\(\)]+\)"
+ return t
+
+ def t_case_COPTEND(t):
+ r";;"
+ return t
+
+ def t_literal(t):
+ r"\\[^\n]"
+ t.type = "TEXT"
+ t.value = t.value[-1] #return litral char
+ return t
+
+ def t_TEXT(t): #most likely commands like "AM_INIT_AUTOMAKE" etc.
+ #Fix this so I can handle variables like the one above as that is NOT a text string
+ r"([^ ;,\t\n\(\)]+|\([^() \t\n]*\))"
+ return t
+
+ def t_ANY_error(t):
+ print("Illegal character '%s'" % t.value[0],t.lexer.lineno)
+ t.lexer.skip(1)
+
+ lexer = lex.lex()
+
+ #lexer.input(acfile)
+ #for tok in lexer:
+ # print(tok)
+
+ #YACC stuff begins here
+
+ def p_complst(p):
+ """
+ complst : complst text
+ | complst ECHO
+ | complst func
+ | complst VAR
+ | complst ifcomp
+ | complst case
+ | complst FUNCOPT
+ | text
+ | ECHO
+ | func
+ | VAR
+ | ifcomp
+ | case
+ | FUNCOPT
+ """
+ if len(p) == 3:
+ p[0] = p[1] + [p[2]]
+ else:
+ p[0] = [p[1]]
+
+ def p_text(p):
+ """
+ text : text TEXT
+ | TEXT
+ """
+ if len(p) == 3:
+ p[0] = p[1] + " " + p[2]
+ else:
+ p[0] = p[1]
+
+ def p_case(p):
+ """
+ case : CASE caseopt CASEEND
+ """
+ p[0] = [p[1]] + [p[2]]
+
+ def p_caseopt(p):
+ """
+ caseopt : caseopt CASEOPT complst COPTEND
+ | CASEOPT complst COPTEND
+ """
+ if len(p) == 5:
+ p[0] = p[1] + [p[2], p[3]]
+ else:
+ p[0] = [p[1], p[2]]
+
+ def p_caseopt2(p):
+ """
+ caseopt : caseopt CASEOPT complst
+ | caseopt CASEOPT COPTEND
+ | CASEOPT complst
+ | CASEOPT COPTEND
+ """
+ if len(p) == 4:
+ if isinstance(p[3],list):
+ p[0] = p[1] + [p[2], p[3]]
+ else:
+ p[0] = p[1] + [p[2], []]
+ else:
+ if isinstance(p[2],list):
+ p[0] = [p[1], p[2]]
+ else:
+ p[0] = [p[1], []]
+
+ def p_ifcomp(p): #perhaps needs elif also
+ """
+ ifcomp : if IFEND
+ """
+ p[0] = p[1]
+
+ def p_if(p):
+ """
+ if : if ELSE complst
+ | IF ifcom THEN complst
+ | if ELIF ifcom THEN complst
+ """
+ if len(p) == 5:
+ p[0] = [[p[1]] + [p[2]], p[4]]
+
+ elif len(p) == 6:
+ p[0] = p[1] + [[p[2]] + [p[3]], p[5]]
+
+ else:
+ p[0] = p[1] + [[p[2]], p[3]]
+
+
+ def p_ifcom(p):
+ """
+ ifcom : ifcom IFCOM
+ | IFCOM
+ """
+ if len(p) == 3:
+ p[0] = p[1] + [p[2]]
+ else:
+ p[0] = [p[1]]
+
+ def p_func(p):
+ """
+ func : FUNC funcopt FUNCEND
+ | COMPFUNC
+ """
+ if len(p) == 2:
+ p[0] = p[1] #this is already ordered
+ else:
+ p[0] = [p[1],p[2]]
+
+ def p_funccomma(p):
+ """
+ funcopt : funcopt COMMA
+ | COMMA complst
+ | COMMA
+ """
+ if len(p) == 3:
+ if isinstance(p[2],list):
+ if len(p[2]) > 1:
+ p[0] = [[]] + [p[2]]
+ else:
+ p[0] = [[]] + p[2]
+
+ else:
+ p[0] = p[1] + [[]]
+ else:
+ p[0] = [[]]
+
+ def p_funcopt(p):
+ """
+ funcopt : funcopt COMMA complst
+ | complst
+ """
+ if len(p) == 4:
+ if len(p[3]) > 1:
+ p[0] = p[1] + [p[3]]
+ else:
+ p[0] = p[1] + p[3]
+ else:
+ if len(p[1]) > 1:
+ p[0] = [p[1]]
+ else:
+ p[0] = p[1]
+
+ def p_error(p):
+ print("syntax error at '%s'" % p.type,p.value)
+ pass
+
+ yacc.yacc()
+
+ items = yacc.parse(acfile)
+ return items
+
+from ebuildgen.filetypes.acif import parseif
+
+def output(inputlst,topdir):
+ variables = dict()
+ iflst = []
+ for item in inputlst:
+ if item[0] == "AC_ARG_ENABLE":
+ name = convnames(item[1][0])
+ if len(item[1]) == 2:
+ variables["enable_" + name] = {"AC_ARG_ENABLE" : ""}
+ elif len(item[1]) == 3:
+ variables["enable_" + name] = [item[1][2],[]]
+ else:
+ variables["enable_" + name] = [item[1][2],item[1][3]]
+
+ #remember to convert chars in the name of "item[1]" that is not
+ #alfanumeric char to underscores _
+ #Done with convnames!
+
+ elif item[0] == "AC_ARG_WITH":
+ name = convnames(item[1][0])
+ if len(item[1]) == 2:
+ variables["with_" + name] = {"AC_ARG_WITH" : ""}
+ elif len(item[1]) == 3:
+ variables["with_" + name] = [item[1][2],[]]
+ else:
+ variables["with_" + name] = [item[1][2],item[1][3]]
+ elif isinstance(item[0],list): #if statements
+ for variable in variables:
+ for pattern in item[0][1]:
+ if variable in pattern:
+ iflst += [[parseif(item[0][1]),ifs(item[1],{})]]
+
+ elif item[0] == "AM_CONDITIONAL":
+ var = item[1][0].strip("[]")
+ cond = parseif(item[1][1].strip("[]").split())
+ for if_state in iflst:
+ if cond[0] in if_state[1]:
+ if cond[1] == "!" or cond[1] == if_state[1][cond[0]]:
+ #"!" == not zero/defined, "" zero/not defined
+ if_state[1][var] = "true"
+
+ elif item[0] == "m4_include":
+ newvar,newiflst = output(scanacfile(openfile(topdir + item[1])),topdir)
+ variables.update(newvar)
+ iflst += newiflst
+
+ #for variable in variables:
+ #print(variable)
+ #print(variables[variable])
+ #print(iflst)
+ return variables,iflst
+
+def ifs(inputlst,variables):
+
+ for item in inputlst:
+ ac_check = 0 #is this an ac_check?
+ if item[0] == "AC_CHECK_HEADERS" or item[0] == "AC_CHECK_HEADER":
+ ac_check = 1
+ elif item[0] == "AC_CHECK_LIB":
+ ac_check = 2
+ elif item[0] == "PKG_CHECK_MODULES":
+ ac_check = 3
+
+ if ac_check:
+ if not isinstance(item[1][0],list):
+ headers = convnames(item[1][0]).split()
+ else:
+ headers = []
+ for header in item[1][0]:
+ headers += convnames(header)
+
+ for header in headers:
+ if ac_check == 1:
+ variables["ac_cv_header_" + header] = "yes"
+ if ac_check == 2:
+ variables["ac_cv_lib_" + header] = "yes"
+
+ if len(item[1]) > 2 and ac_check > 1:
+ if isinstance(item[1][2],list):
+ variables.update(ifs(item[1][2], variables))
+ else:
+ variables.update(ifs(scanacfile(item[1][2].strip("[]")), variables))
+ elif ac_check == 1 and len(item[1]) > 1:
+ if isinstance(item[1][1],list):
+ variables.update(ifs(item[1][1], variables))
+ else:
+ variables.update(ifs(scanacfile(item[1][1].strip("[]")), variables))
+
+ elif isinstance(item[0],list): #if statement
+ variables.update(ifs(item[1],variables))
+
+ elif item[0] == "AC_DEFINE":
+ if len(item[1]) == 1:
+ variables.update({item[1][0].strip("[]") : "1"})
+ else:
+ variables.update({item[1][0].strip("[]") : item[1][1]})
+
+ elif "=" in item:
+ (var,items) = item.split("=")
+ compitems = []
+ #Fix "´" aka exec shell commad comments!
+ for itm in items.strip('"').strip("'").split():
+ if itm[0] == "$":
+ if itm[1:] in variables:
+ compitems += variables[itm[1:]]
+
+ else:
+ compitems += [itm]
+ variables[var] = compitems
+
+ return variables
+
+import re
+def convnames(string): #strip none alfanumeric chars and replace them with "_"
+ string = string.strip("[]") #remove quotes
+ pattern = re.compile("\W")
+ newstr = re.sub(pattern, "_", string)
+ return newstr
+
+#this is no a good name, come up with a better one!
+def scanac(acfile,topdir):
+ return output(scanacfile(acfile),topdir)
+
+def openfile(ofile):
+ with open(ofile, encoding="utf-8", errors="replace") as inputfile:
+ return inputfile.read()
diff --git a/ebuildgen/filetypes/automake.py b/ebuildgen/filetypes/automake.py
new file mode 100644
index 0000000..c4ca432
--- /dev/null
+++ b/ebuildgen/filetypes/automake.py
@@ -0,0 +1,342 @@
+from ply import lex
+from ply import yacc
+import glob
+import os
+
+def scanamfile(amfile):
+ """Scan automake (.am) file
+
+ Returns ...
+ """
+ amfile = "\n" + amfile #Add \n so you can guess vars
+ tokens = (
+ "END",
+ "COL",
+ "EQ",
+ "PEQ",
+ "CVAR",
+ "MVAR",
+ "TEXT",
+ "ENDTAB",
+ "SPACE",
+ "IF",
+ "ELSE",
+ "ENDIF",
+ )
+
+ states = (
+ ("com", "exclusive"), #comment
+ ("var", "inclusive"),
+ ("if", "exclusive"),
+ )
+
+ def t_begin_com(t):
+ r"[ \t]*\#"
+ t.lexer.begin("com")
+
+ def t_com_other(t):
+ r"[^\\\n]+"
+ pass
+
+ def t_com_lit(t):
+ r"\\."
+ pass
+
+ def t_com_newline(t):
+ r".*\\\n"
+ t.lexer.lineno += 1
+ pass
+
+ def t_ifbegin(t):
+ #ugly hack to ensure that this is at the begining of the line and keep the newline token.
+ #PLY doesn't support the "^" beginning of line regexp :,(
+ r"\nif"
+ t.type = "END"
+ t.lexer.push_state("if")
+ return t
+
+ def t_if_IF(t):
+ #http://www.gnu.org/s/hello/manual/automake/Usage-of-Conditionals.html#Usage-of-Conditionals
+ r"[ \t]+[^ \n\t]*"
+ t.value = t.value.strip() #take the variable to test
+ t.lexer.pop_state()
+ return t
+
+ def t_ELSE(t):
+ r"\nelse"
+ return t
+
+ def t_ENDIF(t):
+ r"\nendif"
+ return t
+
+ def t_CVAR(t): #configure variable
+ r"@.*?@" #not greedy
+ return t
+
+ def t_MVAR(t): #makefile variable
+ r"\$\(.*?\)"
+ return t
+
+ def t_com_END(t):
+ r"\n"
+ t.lexer.begin("INITIAL")
+ t.lexer.lineno += 1
+ return t
+
+ def t_EQ(t):
+ r"[ \t]*=[ \t]*"
+ t.lexer.begin("var")
+ t.value = t.value.strip()
+ return t
+
+ def t_PEQ(t):
+ r"[ \t]*\+=[ \t]*"
+ t.lexer.begin("var")
+ t.value = t.value.strip()
+ return t
+
+ def t_contline(t):
+ r"\\\n"
+ t.lexer.lineno += 1
+ pass
+
+ def t_litteral(t):
+ r"\\."
+ t.value = t.value[1] #take the literal char
+ t.type = "TEXT"
+ return t
+
+ def t_COL(t):
+ r"[ \t]*:[ \t]*"
+ t.lexer.begin("var")
+ return t
+
+ def t_var_ENDTAB(t):
+ r"[ \t]*;[ \t]*"
+ return t
+
+ def t_ENDTAB(t):
+ r"[ \t]*\n\t[ \t]*"
+ t.lexer.lineno += 1
+ return t
+
+ def t_var_TEXT(t):
+ r"[^ #\n\t,\$@\\]+"
+ return t
+
+ def t_TEXT(t):
+ r"[^ \n\t:=\$@\\]+"
+ return t
+
+ def t_END(t):
+ r"[ \t]*\n"
+ t.lexer.lineno += t.value.count('\n')
+ t.lexer.begin('INITIAL')
+ return t
+
+ def t_var_SPACE(t):
+ r"[ \t]+"
+ return t
+
+ def t_space(t):
+ r"[ \t]"
+ pass
+
+ def t_var_special(t):
+ r"\$[^({]"
+ t.type = "TEXT"
+ return t
+
+ def t_ANY_error(t):
+ print("Illegal character '%s'" % t.value[0])
+ t.lexer.skip(1)
+
+ lexer = lex.lex()
+
+ #lexer.input(amfile)
+ #for tok in lexer:
+ # print(tok)
+
+ #YACC stuff begins here
+
+ def p_done(p):
+ "done : vars end"
+ p[0] = p[1]
+
+ def p_vars(p):
+ """
+ vars : vars end var
+ | end var
+ """
+ if len(p) == 4:
+ p[1][0].update(p[3][0])
+ p[1][2].update(p[3][2])
+ p[0] = [p[1][0], p[1][1] + p[3][1], p[1][2]]
+
+ else:
+ p[0] = p[2]
+
+ def p_if(p):
+ """
+ var : IF vars ENDIF
+ | IF vars ELSE vars ENDIF
+ """
+ if len(p) == 4:
+ p[0] = [{},[],{p[1]:p[2]}]
+
+ else:
+ p[0] = [{},[],{p[1]:p[2],"!"+p[1]:p[4]}]
+
+ def p_var(p):
+ """
+ var : textstr EQ textlst
+ | textstr EQ
+ | textstr PEQ textlst
+ """
+ if p[2] == "=":
+ if len(p) == 4:
+ p[0] = [{p[1]: p[3]},[],{}]
+ else:
+ p[0] = [{p[1]: []},[],{}]
+ else:
+ p[0] = [{},[[p[1], p[3]]],{}]
+
+ def p_textlst(p):
+ """
+ textlst : textlst spacestr textstr
+ | textstr
+ """
+ if len(p) == 4:
+ p[0] = p[1] + [p[3]]
+ else:
+ p[0] = [p[1]]
+
+ def p_teststr(p):
+ """
+ textstr : textstr TEXT
+ | textstr CVAR
+ | textstr MVAR
+ | TEXT
+ | CVAR
+ | MVAR
+ """
+ if len(p) == 3:
+ p[0] = p[1] + p[2]
+ else:
+ p[0] = p[1]
+
+ def p_space(p):
+ """
+ spacestr : spacestr SPACE
+ | SPACE
+ """
+ if len(p) == 3:
+ p[0] = p[1] + p[2]
+ else:
+ p[0] = p[1]
+
+ def p_end(p):
+ """
+ end : end END
+ | END
+ """
+
+ def p_error(p):
+ print("syntax error at '%s'" % p.type,p.value)
+ pass
+
+ yacc.yacc()
+
+ variables = yacc.parse(amfile)
+ return variables
+
+def initscan(amfile,iflst):
+ useflag_sources = {} #{source: [useflag, value]}
+ incflag_sources = {} #{source: [include flags]}
+ top_dir = os.path.split(amfile)[0] + "/"
+
+ def scan(amfile):
+ curdir = os.path.split(amfile)[0] + "/"
+ amlist = scanamfile(openfile(amfile))
+ #print(amfile)
+
+ def sources_to_scan(amlist,curdir):
+ incflags = []
+ sources = []
+ extra_sources = []
+ #perhaps use set() here to eliminate the possibilty of duplicates?
+ for variable in amlist[0]:
+ if variable.split("_")[-1] == "SOURCES":
+ if variable.split("_")[0] == "EXTRA":
+ extra_sources += amlist[0][variable]
+ else:
+ sources += amlist[0][variable]
+
+ if variable.split("_")[-1] == "LDADD":
+ for item in amlist[0][variable]:
+ if item[0] == "@" and item[-1] == "@":
+ for ifstate in iflst:
+ if item.strip("@") in ifstate[1]:
+ for file in ifstate[1][item.strip("@")]:
+ for src in extra_sources:
+ if file.split(".")[0] == src.split(".")[0]:
+ useflag_sources[curdir + src] = ifstate[0]
+ incflag_sources[curdir + src] = incflags
+
+ for src in extra_sources:
+ if item.split(".")[0] == src.split(".")[0]:
+ sources += [src]
+
+ if variable.split("_")[-1] == "CFLAGS" or variable == "DEFAULT_INCLUDES":
+ for item in amlist[0][variable]:
+ if item[:2] == "-I":
+ if item[2:] == "$(top_srcdir)" or item[2:] == "$(srcdir)":
+ incflags += [top_dir]
+ elif item[2] == "/":
+ incflags += [item[2:]]
+ else:
+ incflags += [curdir + item[2:]]
+
+ if not "DEFAULT_INCLUDES" in amlist[0]:
+ incflags += [curdir,top_dir]
+
+ if "SUBDIRS" in amlist[0]:
+ for dir in amlist[0]["SUBDIRS"]:
+ sources += scan(curdir + dir + "/Makefile.am")
+
+ for lst in amlist[1]:
+ if lst[0] == "SUBDIRS":
+ for dir in lst[1]:
+ sources += scan(curdir + dir + "/Makefile.am")
+
+ for ifstatement in amlist[2]:
+ #print(ifstatement)
+ for item in iflst:
+ if ifstatement.lstrip("!") in item[1]:
+ if ifstatement[0] == "!":
+ if item[1][ifstatement.lstrip("!")] == "false":
+ for src in sources_to_scan(amlist[2][ifstatement],curdir):
+ useflag_sources[src] = item[0]
+
+ elif item[1][ifstatement] == "true":
+ for src in sources_to_scan(amlist[2][ifstatement],curdir):
+ useflag_sources[src] = item[0]
+
+ #add filepath
+ dirsources = []
+ for source in sources:
+ if os.path.split(source)[0] == "":
+ dirsources += [curdir + source]
+ incflag_sources[curdir + source] = incflags
+ else:
+ dirsources += [source]
+
+ return dirsources
+
+ return sources_to_scan(amlist,curdir)
+ return scan(amfile),useflag_sources,incflag_sources
+
+def openfile(ofile):
+ with open(ofile, encoding="utf-8", errors="replace") as inputfile:
+ return inputfile.read()
diff --git a/ebuildgen/filetypes/ctypefiles.py b/ebuildgen/filetypes/ctypefiles.py
new file mode 100644
index 0000000..50b20ed
--- /dev/null
+++ b/ebuildgen/filetypes/ctypefiles.py
@@ -0,0 +1,229 @@
+import glob
+from ply import lex
+from ply import yacc
+
+#lex stuff begins here
+
+def scanincludes(string,inclst,curdir,incpaths):
+ """Scan ctype files for #includes
+
+ Adds and returns new includes to the supplied include list
+ input:
+ string with the file contents to scan,
+ a include list
+ string with the current working dir
+ """
+ tokens = (
+ "GINCLUDE",
+ "LINCLUDE",
+ #"BUNDLEINC",
+ "IFDEF",
+ "ENDIF",
+ )
+
+ states = (
+ ("com","exclusive"), #comment
+ ("ifdef","inclusive"),
+ )
+
+ t_ANY_ignore = " \t"
+
+ def t_begin_com(t):
+ r"/\*"
+ t.lexer.push_state("com")
+
+ def t_com_end(t):
+ r"\*/"
+ t.lexer.pop_state()
+ pass
+
+ def t_line_com(t):
+ r"//.*"
+ pass
+
+ def t_ANY_begin_if0(t):
+ r"\#if[ \t]+0"
+ t.lexer.push_state("com")
+
+ def t_com_endif(t):
+ r"\#endif"
+ t.lexer.pop_state()
+ pass
+
+ def t_com_ifdef(t):
+ r"\#ifdef"
+ t.lexer.push_state("com")
+
+ def t_IFDEF(t):
+ r"\#ifdef[ \t]+[a-zA-Z_][a-zA-Z0-9_]*"
+ t.value = t.value[6:].strip() #return the ifdef name
+ t.lexer.push_state("ifdef")
+ return t
+
+ def t_ifdef_ENDIF(t):
+ r"\#endif"
+ t.lexer.pop_state()
+ return t
+
+ def t_GINCLUDE(t):
+ r"\#[ \t]*[Ii][Nn][Cc][Ll][Uu][Dd][Ee][ \t]+<.*\.h>"
+ t.value = t.value[t.value.find("<"):].strip().strip("<>")
+ return t
+
+ def t_LINCLUDE(t):
+ r"\#[ \t]*[Ii][Nn][Cc][Ll][Uu][Dd][Ee][ \t]+\".*\.h\""
+ t.value = t.value[t.value.find('"'):].strip().strip('""')
+ return t
+
+ def t_BUNDLEINC(t):
+ r"\#[ \t]*[Ii][Nn][Cc][Ll][Uu][Dd][Ee][ \t]+<.*>"
+ pass
+
+ def t_ANY_error(t):
+ #print("Illegal character '%s'" % t.value[0])
+ t.lexer.skip(1)
+
+ lexer = lex.lex()
+
+ #lexer.input(string)
+ #
+ #for tok in lexer:
+ # print(tok)
+ #
+ #YACC stuff here
+
+ def p_includes2(p):
+ """
+ includes : includes ginc
+ """
+ if islocalinc(p[2],curdir,incpaths):
+ p[1][1].add(p[2])
+ else:
+ p[1][0].add(p[2])
+ p[0] = p[1]
+
+ def p_lincludes(p):
+ """
+ includes : includes linc
+ """
+ locincpaths = incpaths + [curdir + "/"]
+ if islocalinc(p[2],curdir,locincpaths):
+ p[1][1].add(p[2])
+ else:
+ p[1][0].add(p[2])
+ p[0] = p[1]
+
+ def p_ifdef(p):
+ """
+ includes : includes IFDEF includes ENDIF
+ | IFDEF includes ENDIF
+ """
+ if len(p) == 5:
+ p[1][2] = addnewifdefs(p[1][2],{p[2] : p[3]})
+ p[0] = p[1]
+ else:
+ ifdef = {}
+ ifdef[p[1]] = p[2]
+ p[0] = [set(),set(),ifdef]
+
+ def p_ifdefempty(p):
+ """
+ includes : includes IFDEF ENDIF
+ | IFDEF ENDIF
+ """
+ if len(p) == 4:
+ p[0] = p[1]
+ else:
+ p[0] = [set(),set(),{}]
+
+ def p_ginc(p):
+ "includes : ginc"
+ globinc = set()
+ globinc.add(p[1])
+ if islocalinc(p[1], curdir, incpaths):
+ p[0] = [set(),globinc,{}]
+ else:
+ p[0] = [globinc,set(),{}]
+
+ def p_linc(p):
+ "includes : linc"
+ locinc = set()
+ locinc.add(p[1])
+ locincpaths = incpaths + [curdir + "/"]
+ if islocalinc(p[1], curdir, locincpaths):
+ p[0] = [set(),locinc,{}]
+ else:
+ p[0] = [locinc,set(),{}]
+
+ def p_ginclude(p):
+ "ginc : GINCLUDE"
+ p[0] = p[1]
+
+ def p_linclude(p):
+ "linc : LINCLUDE"
+ p[0] = p[1]
+
+ def p_error(p):
+ print("syntax error at '%s'" % p.type)
+ pass
+
+ yacc.yacc()
+
+ newinclst = yacc.parse(string)
+ if newinclst == None:
+ #Check if the file didn't have any includes
+ return(inclst)
+ newinclst = addnewincludes(newinclst,inclst)
+ return(newinclst)
+
+def islocalinc(inc, curdir, incpaths):
+ """Checks if this is a local include
+
+ Checks if the file can be found with the path that is supplied.
+ If not this is probably a global include and thus return False
+ """
+
+ for incpath in incpaths:
+ #check if the path for a local inc is correct.
+ #The work dir is in /tmp.
+ if incpath[:4] == "/tmp":
+ if not glob.glob(incpath + inc) == []:
+ return True
+
+ return False
+
+def addnewincludes(inclist1,inclist2):
+ """Adds new includes to the first inclist and return it
+
+ Does a deeper scan for ifdef includes
+ """
+ #come up with better names!!
+ inclist1[0] = inclist1[0] | inclist2[0]
+ inclist1[1] = inclist1[1] | inclist2[1]
+ inclist1[2] = addnewifdefs(inclist1[2],inclist2[2])
+ return(inclist1)
+
+def addnewifdefs(dict1,dict2):
+ """Merges the ifdef section of the inclst
+
+ Returns a new list with all of the ifdefs
+ """
+
+ if dict1 == {} and dict2 == {}:
+ #we are done here
+ return(dict())
+ dups = dict1.keys() & dict2.keys()
+ if dups == set():
+ #no duplicates, empty set()
+ for name in dict2:
+ dict1[name] = dict2[name]
+ return(dict1)
+
+ for name in dups:
+ dict1[name][0] = dict1[name][0] | dict2[name][0]
+ dict1[name][1] = dict1[name][1] | dict2[name][1]
+ dict1[name][2] = addnewifdefs(dict1[name][2],dict2[name][2])
+ dict2.pop(name)
+ for name in dict2:
+ dict1[name] = dict2[name]
+ return(dict1)
diff --git a/ebuildgen/filetypes/makefilecom.py b/ebuildgen/filetypes/makefilecom.py
new file mode 100644
index 0000000..e76a15c
--- /dev/null
+++ b/ebuildgen/filetypes/makefilecom.py
@@ -0,0 +1,396 @@
+from ply import lex
+from ply import yacc
+import glob
+import os
+from subprocess import getstatusoutput
+
+def expand(lst,variables):
+ """Expands makefile variables.
+
+ Expand all items in the supplied list that are list within the list.
+ Returns a list where all the previously unexpanded variables are now
+ expanded.
+ Besides the list this needs a dict with variables found in the makefile.
+ """
+
+ newlst = []
+ for item in lst:
+ if isinstance(item, list):
+ strlst = com_interp(item[0],variables)
+ newlst += expand(strlst,variables)
+ else:
+ newlst.append(item)
+
+ return newlst
+
+def com_interp(string,variables):
+ """Interpret the supplied command and return a list with the output
+
+ """
+
+ tokens = (
+ "COMMAND",
+ "COMMA",
+ "COL",
+ "EQ",
+ "TEXT",
+ "PERCENT",
+ "BEGINCOM",
+ "ENDCOM",
+ "SPACE",
+ )
+ states = (
+ ("ccode", "exclusive"), #command code
+ ("eval", "exclusive"), #code to evaluate
+ )
+
+ # Match the first $(. Enter ccode state.
+ def t_eval_ccode(t):
+ r'\$(\{|\()'
+ t.lexer.code_start = t.lexer.lexpos # Record the starting position
+ t.lexer.level = 1 # Initial level
+ t.lexer.push_state('ccode') # Enter 'ccode' state
+
+ # Rules for the ccode state
+ def t_ccode_newcom(t):
+ r'\$(\{|\()'
+ t.lexer.level +=1
+
+ def t_ccode_endcom(t):
+ r'(\}|\))'
+ t.lexer.level -=1
+
+ # If closing command, return the code fragment
+ if t.lexer.level == 0:
+ t.value = t.lexer.lexdata[t.lexer.code_start-1:t.lexer.lexpos]
+ t.type = "COMMAND"
+ t.lexer.pop_state()
+ return t
+
+ def t_ccode_text(t):
+ r"[^\$\(\{\)\}]"
+
+ def t_BEGINCOM(t):
+ r"(\(|\{)"
+ t.lexer.begin("eval")
+ return t
+
+ def t_eval_ENDCOM(t):
+ r"(\)|\})"
+ t.lexer.begin("INITIAL")
+ return t
+
+ def t_eval_PERCENT(t):
+ r"\%"
+ return t
+
+ def t_eval_EQ(t):
+ r"="
+ return t
+
+ def t_eval_COMMA(t):
+ r",[ \t]*"
+ return t
+
+ def t_eval_COL(t):
+ r":"
+ return t
+
+ def t_eval_TEXT(t):
+ r"[^ \n\t:=\)\}\(\}\\\$,]+"
+ return t
+
+ def t_TEXT(t):
+ r"[^ \t$\(\{]"
+ return t
+
+ def t_ANY_SPACE(t):
+ r"[ \t]"
+ return t
+
+ def t_ANY_error(t):
+ print("Illegal character '%s'" % t.value[0])
+ t.lexer.skip(1)
+
+ lexer = lex.lex()
+
+ #lexer.input(string)
+ #for tok in lexer:
+ # print(tok)
+
+
+ #YACC stuff begins here
+
+ def p_comp(p):
+ """
+ complst : BEGINCOM newstr ENDCOM
+ | func
+ """
+ if len(p) == 4:
+ p[0] = p[2]
+ else:
+ p[0] = p[1]
+
+ def p_complst(p):
+ "complst : compstr"
+ p[0] = p[1].split()
+
+ def p_compstr(p):
+ """
+ compstr : compstr BEGINCOM textstr ENDCOM
+ | BEGINCOM textstr ENDCOM
+ | compstr textstr
+ | compstr spacestr
+ | textstr
+ | spacestr
+ """
+ p[0] = ""
+ if len(p) == 4:
+ if p[2] in variables:
+ for item in expand(variables[p[2]],variables):
+ p[0] += item + " "
+ p[0] = p[0][:-1]
+ else:
+ p[0] = ""
+ elif len(p) == 5:
+ if p[3] in variables:
+ for item in expand(variables[p[3]],variables):
+ p[1] += item + " "
+ p[0] = p[1][:-1]
+ else:
+ p[0] = ""
+ elif len(p) == 3:
+ p[0] = p[1] + p[2]
+ else:
+ p[0] = p[1]
+
+ def p_tonewstr(p):
+ """
+ newstr : getstr EQ textstr PERCENT textstr
+ | getstr EQ PERCENT textstr
+ | getstr EQ textstr PERCENT
+ | getstr EQ PERCENT
+ | getstr EQ textstr
+ """
+ newtextlist = []
+ if p[1] == []:
+ p[0] = p[1]
+ elif len(p) == 6:
+ for text in p[1]:
+ newtextlist.append(p[3] + text + p[5])
+ p[0] = newtextlist
+
+ elif len(p) == 5:
+ if p[3] == "%":
+ for text in p[1]:
+ newtextlist.append(text + p[4])
+ p[0] = newtextlist
+ else:
+ for text in p[1]:
+ newtextlist.append(p[3] + text)
+ p[0] = newtextlist
+
+ elif p[3] == "%":
+ p[0] = p[1]
+ else:
+ for text in p[1]:
+ newtextlist.append(text + p[3])
+ p[0] = newtextlist
+
+
+ def p_getstr(p):
+ """
+ getstr : textstr COL textstr PERCENT textstr
+ | textstr COL PERCENT textstr
+ | textstr COL textstr PERCENT
+ | textstr COL PERCENT
+ | textstr COL textstr
+ """
+ if not p[1] in variables:
+ p[0] = []
+ else:
+ textlst = expand(variables[p[1]],variables) #make sure it's expanded
+ newtextlst = []
+
+ if len(p) == 6:
+ l1 = len(p[3]) #length of str1
+ l2 = len(p[5])
+ for text in textlst:
+ if p[3] == text[0:l1] and p[5] == text[-l2:]:
+ newtextlst.append(text[l1:-l2])
+
+ p[0] = newtextlst
+
+ elif len(p) == 5:
+ if p[3] == "%":
+ l1 = len(p[4])
+ for text in textlst:
+ if p[4] == text[-l1:]:
+ newtextlst.append(text[:-l1])
+
+ p[0] = newtextlst
+ else:
+ l1 = len(p[3])
+ for text in textlst:
+ if p[3] == text[0:l1]:
+ newtextlst.append(text[l1:])
+
+ p[0] = newtextlst
+ elif p[3] == "%":
+ p[0] = textlst
+ else:
+ l1 = len(p[3])
+ for text in textlst:
+ if p[3] == text[-l1:]:
+ newtextlst.append(text[:-l1])
+
+ p[0] = newtextlst
+
+ def p_func(p):
+ """
+ func : BEGINCOM textstr SPACE funcinput
+ """
+ #result = ["This calls a function"]
+ result = funcdict[p[2]](p[4],variables)
+ p[0] = result
+
+ def p_funcinput(p):
+ """
+ funcinput : funcinput inputstr COMMA
+ | funcinput inputstr ENDCOM
+ | inputstr COMMA
+ | inputstr ENDCOM
+ """
+ if len(p) == 4:
+ if "(" in p[2]: #command in the str
+ p[1].append([p[2]])
+ else:
+ p[1].append(p[2])
+ p[0] = p[1]
+ else:
+ if "(" in p[1]:
+ p[0] = [[p[1]]]
+ else:
+ p[0] = [p[1]]
+
+ def p_inputstr(p):
+ """
+ inputstr : inputstr spacestr
+ | inputstr TEXT
+ | inputstr COMMAND
+ | spacestr
+ | TEXT
+ | COMMAND
+ """
+ if len(p) == 3:
+ p[0] = p[1] + p[2]
+ else:
+ p[0] = p[1]
+
+ def p_command(p):
+ """
+ textstr : textstr COMMAND
+ | COMMAND
+ """
+ if len(p) == 3:
+ for item in com_interp(p[2],variables):
+ p[1] += item + " "
+ p[0] = p[1][:-1]
+ else:
+ p[0] = ""
+ for item in com_interp(p[1],variables):
+ p[0] += item + " "
+ p[0] = p[0][:-1] #remove the last space
+
+ def p_textstr(p):
+ """
+ textstr : textstr TEXT
+ | TEXT
+ """
+ if len(p) == 3:
+ p[0] = p[1] + p[2]
+ else:
+ p[0] = p[1]
+
+ def p_spacestr(p):
+ """
+ spacestr : spacestr SPACE
+ | SPACE
+ """
+ if len(p) == 3:
+ p[0] = p[1] + p[2]
+ else:
+ p[0] = p[1]
+
+ def p_error(p):
+ print("syntax error at '%s'" % p.type,p.lexpos)
+ pass
+
+ yacc.yacc()
+
+ retlst = yacc.parse(string)
+
+ #print(retlst)
+
+ return retlst
+
+def foreach(inputlst,variables):
+ """GNU makefile foreach.
+
+ """
+
+ result = []
+ var = expand(inputlst[0:1],variables)
+ lst = expand(inputlst[1:2],variables)
+ for item in lst:
+ variables[var[0]] = [item]
+ result += expand([inputlst[2]],variables)
+
+ return result
+
+def wildcard(inputlst,variables):
+ """GNU makefile wildcard
+
+ """
+ command = expand(inputlst,variables)
+ return glob.glob(command[0])
+
+def shell(inputlst,variables):
+ """GNU makefile shell command
+
+ """
+ command = ""
+ retlst = []
+ for item in expand(inputlst,variables):
+ command += item + " "
+ (status,returnstr) = getstatusoutput(command)
+ if status:
+ print("Error with command" + command)
+ for item in returnstr.split():
+ retlst.append(item)
+ return retlst
+
+def notdir(inputlst,variables): #strip the dir from the file name
+ """GNU makefile notdir
+
+ """
+ if isinstance(inputlst[0],list):
+ files = expand(inputlst,variables)
+ else:
+ files = inputlst[0].split()
+
+ notdirf = []
+ for file in files:
+ notdirf.append(os.path.split(file)[1])
+
+ return notdirf
+
+funcdict = {
+ "foreach" : foreach,
+ "wildcard" : wildcard,
+ "shell" : shell,
+ "notdir" : notdir,
+ }
+
+#print(com_interp("(shell pkg-config --cflags libsoup-2.4 $(x))",{"x":["gtk+-2.0"], "y":[".py"], "z":["u"]}))
+
diff --git a/ebuildgen/filetypes/makefiles.py b/ebuildgen/filetypes/makefiles.py
new file mode 100644
index 0000000..881a860
--- /dev/null
+++ b/ebuildgen/filetypes/makefiles.py
@@ -0,0 +1,479 @@
+from ply import lex
+from ply import yacc
+import glob
+from ebuildgen.filetypes.makefilecom import expand
+
+def scanmakefile(makefile):
+ """Scan supplied makefile.
+
+ Returns a list of targets and variables found
+ """
+ makefile = "\n" + makefile #Add \n so you can guess vars
+ tokens = (
+ "END",
+ "COL",
+ "SEMICOL",
+ "EQ",
+ "PEQ",
+ "CEQ",
+ "QEQ",
+ "TEXT",
+ "COMMAND",
+ "ENDTAB",
+ "SPACE",
+ )
+
+ states = (
+ ("com", "exclusive"),
+ ("ccode", "exclusive"), #command code
+ ("var", "inclusive"),
+ )
+
+ # Match the first $(. Enter ccode state.
+ def t_ccode(t):
+ r'\$(\{|\()'
+ t.lexer.code_start = t.lexer.lexpos # Record the starting position
+ t.lexer.level = 1 # Initial level
+ t.lexer.push_state('ccode') # Enter 'ccode' state
+
+ # Rules for the ccode state
+ def t_ccode_newcom(t):
+ r'\$(\{|\()'
+ t.lexer.level +=1
+
+ def t_ccode_endcom(t):
+ r'(\}|\))'
+ t.lexer.level -=1
+
+ # If closing command, return the code fragment
+ if t.lexer.level == 0:
+ t.value = t.lexer.lexdata[t.lexer.code_start-1:t.lexer.lexpos]
+ t.type = "COMMAND"
+ t.lexer.pop_state()
+ return t
+
+ def t_ccode_text(t):
+ r"[^\$\(\{\)\}]"
+
+ def t_begin_com(t):
+ r"[ \t]*\#"
+ t.lexer.begin("com")
+
+ def t_com_other(t):
+ r"[^\\\n]+"
+ pass
+
+ def t_com_lit(t):
+ r"\\."
+ pass
+
+ def t_com_newline(t):
+ r".*\\\n"
+ t.lexer.lineno += 1
+ pass
+
+ def t_com_END(t):
+ r"\n"
+ t.lexer.begin("INITIAL")
+ t.lexer.lineno += 1
+ return t
+
+ def t_bsdexe(t): #Create a cleaner version
+ r".*\!=.*"
+ pass
+
+ def t_EQ(t):
+ r"[ \t]*=[ \t]*"
+ t.lexer.begin("var")
+ return t
+
+ def t_PEQ(t):
+ r"[ \t]*\+=[ \t]*"
+ t.lexer.begin("var")
+ return t
+
+ def t_CEQ(t):
+ r"[ \t]*:=[ \t]*"
+ t.lexer.begin("var")
+ return t
+
+ def t_QEQ(t):
+ r"[ \t]*\?=[ \t]*"
+ t.lexer.begin("var")
+ return t
+
+ def t_contline(t):
+ r"\\\n"
+ t.lexer.lineno += 1
+ pass
+
+ def t_litteral(t):
+ r"\\."
+ t.value = t.value[1] #take the literal char
+ t.type = "TEXT"
+ return t
+
+ def t_COL(t):
+ r"[ \t]*:[ \t]*"
+ t.lexer.begin("var")
+ return t
+
+ def t_var_ENDTAB(t):
+ r"[ \t]*;[ \t]*"
+ return t
+
+ def t_SEMICOL(t):
+ r";"
+ return t
+
+ def t_COMMA(t):
+ r","
+ return t
+
+ def t_ENDTAB(t):
+ r"[ \t]*\n\t[ \t]*"
+ t.lexer.lineno += 1
+ return t
+
+ def t_var_TEXT(t):
+ r"[^ #\n\t,\$\\]+"
+ return t
+
+ def t_TEXT(t):
+ r"[^ \n\t:\?\+=\\,\$]+"
+ return t
+
+ def t_END(t):
+ r"[ \t]*\n+"
+ t.lexer.lineno += t.value.count('\n')
+ t.lexer.begin('INITIAL')
+ return t
+
+ def t_SPACE(t):
+ r"[ \t]"
+ return t
+
+ def t_var_special(t):
+ r"\$[^({]"
+ t.type = "TEXT"
+ return t
+
+ def t_ANY_error(t):
+ print("Illegal character '%s'" % t.value[0])
+ t.lexer.skip(1)
+
+ lexer = lex.lex()
+
+ #lexer.input(makefile)
+ #for tok in lexer:
+ # print(tok)
+
+ #YACC begins here
+
+ #a dict with values of defined variables
+ variables = {}
+ ivars = [] #keep track of the immediate variables
+ targets = [] #buildtargets, [[target,deps,options],[target2,....
+
+
+ def p_testvar(p):
+ """
+ comp : comp var
+ | comp rule
+ | comp end
+ | var
+ | rule
+ """
+
+ def p_ruleoption(p):
+ """
+ rule : end textlst COL textlst options
+ | end textlst COL options
+ """
+ if len(p) == 6:
+ rulelst = convtargets(p[2],p[4],targets,variables)
+ for rule in rulelst:
+ rule = findfiles(rule,variables) #Implicit rule (path search)
+ rule.append(p[5])
+ targets.append(rule)
+ else:
+ rulelst = convtargets(p[2],[],targets,variables)
+ for rule in rulelst:
+ rule = findfiles(rule,variables) #Implicit rule (path search)
+ rule.append(p[4])
+ targets.append(rule)
+
+ def p_rule(p):
+ """
+ rule : end textlst COL textlst
+ | end textlst COL
+ """
+ if len(p) == 5:
+ rulelst = convtargets(p[2],p[4],targets,variables)
+ for rule in rulelst:
+ rule,newtars = imprules(rule,targets,variables)
+ targets.append(rule)
+ for tar in newtars:
+ targets.append(tar)
+ else:
+ rulelst = convtargets(p[2],[],targets,variables)
+ for rule in rulelst:
+ rule,newtars = imprules(rule,targets,variables)
+ targets.append(rule)
+ for tar in newtars:
+ targets.append(tar)
+
+ def p_peq(p): #immediate if peq was defined as immediate before else deferred
+ """
+ var : end textstr PEQ textlst
+ | end textstr PEQ
+ """
+ if len(p) == 5:
+ if not p[2] in variables:
+ variables[p[2]] = p[4]
+ elif not p[2] in ivars:
+ variables[p[2]] += p[4]
+ else:
+ textvalue = expand(p[4],variables) #expand any variables
+ variables[p[2]] = textvalue
+
+ def p_ceq(p): #immediate
+ """
+ var : end textstr CEQ textlst
+ | end textstr CEQ
+ """
+ if len(p) == 5:
+ textvalue = expand(p[4],variables) #expand any variables
+ variables[p[2]] = textvalue
+ ivars.append(p[2])
+ else:
+ variables[p[2]] = []
+ ivars.append(p[2])
+
+ def p_qeq(p): #deferred
+ """
+ var : end textstr QEQ textlst
+ | end textstr QEQ
+ """
+ if not p[2] in variables and len(p) == 5:
+ variables[p[2]] = p[4]
+ else:
+ variables[p[2]] = []
+
+ def p_var(p): #deferred
+ """
+ var : end textstr EQ textlst
+ | end textstr EQ
+ """
+ if len(p) == 5:
+ variables[p[2]] = p[4]
+ else:
+ variables[p[2]] = []
+
+ def p_options(p):
+ """
+ options : options ENDTAB textlst
+ | ENDTAB textlst
+ """
+ if len(p) == 4:
+ p[0] = p[1] + p[3]
+ else:
+ p[0] = p[2]
+
+ def p_textlst(p):
+ """
+ textlst : textlst spacestr command
+ | textlst spacestr textstr
+ | command
+ | textstr
+ """
+ if len(p) == 4:
+ p[0] = p[1]+ [p[3]]
+ else:
+ p[0] = [p[1]]
+
+ def p_com_and_str(p):
+ """
+ command : command textstr
+ | textstr command
+ """
+ if isinstance(p[1],list):
+ p[0] = [p[1][0] + p[2]]
+ else:
+ p[0] = [p[1] + p[2][0]]
+
+ def p_textstr(p):
+ """
+ textstr : textstr TEXT
+ | TEXT
+ """
+ if len(p) == 3:
+ p[0] = p[1] + p[2]
+ else:
+ p[0] = p[1]
+
+ def p_command(p):
+ """
+ command : command COMMAND
+ | COMMAND
+ """
+ if len(p) == 2:
+ p[0] = [p[1]] #commands are lists within the textlst
+ else:
+ p[0] = [p[1][0] + p[2]]
+
+ def p_space(p):
+ """
+ spacestr : spacestr SPACE
+ | SPACE
+ """
+ if len(p) == 3:
+ p[0] = p[1] + p[2]
+ else:
+ p[0] = p[1]
+
+ def p_end(p):
+ """
+ end : end END
+ | end spacestr END
+ | END
+ """
+
+
+ def p_error(p):
+ print("syntax error at '%s'" % p.type,p.value)
+ pass
+
+ yacc.yacc()
+
+ yacc.parse(makefile)
+
+ #for target in targets:
+ # print(target)
+ #print(variables)
+
+ return targets,variables
+
+
+def convtargets(tarlist,deplist,targets,variables):
+ """Convert makefile targets that are not explicitly stated in the makefile
+
+ """
+
+ finaltars = []
+ deps = expand(deplist,variables)
+ tars = expand(tarlist,variables) #ugh high risk of confusion because of the names...
+ for target in tars:
+ if "%" in target:
+ tarsplit = target.split("%")
+ (l1,l2) = len(tarsplit[0]), len(tarsplit[1])
+ for buildtarget in targets:
+ for newtar in buildtarget[1]:
+ if newtar[-l2:] == tarsplit[1] and newtar[0:l1] == tarsplit[0]:
+ rulelst = [newtar,[]]
+ for newdep in deps:
+ if "%" in newdep:
+ depsplit = newdep.split("%")
+ rulelst[1] += [depsplit[0] + newtar[l1:-l2] + depsplit[1]]
+ else:
+ rulelst[1] += [newdep]
+ finaltars.append(rulelst)
+ else:
+ finaltars.append([target,deps])
+ return finaltars
+
+def findfiles(rule,variables): #check if deps exists, if not look for them in VPATH.
+ """Find files for a implicit makefile rule
+
+ This searches the VPATH for files that match the name of the implicitly stated target
+ IE io.o -> src/io.c (if VPATH is src for example)
+ """
+ newtarget = []
+ newdeps = []
+ if "VPATH" in variables: #if vpath isn't defined this it's useless to search
+ if glob.glob(rule[0]): #check target
+ newtarget.append(rule[0])
+ else: #search for it
+ matches = []
+ for path in variables["VPATH"]:
+ matches += glob.glob(path + "/" + rule[0])
+ if matches:
+ newtarget.append(matches[0])
+ else:
+ newtarget.append(rule[0])
+
+ for dep in rule[1]:
+ if glob.glob(dep):
+ newdeps.append(dep)
+ else: #search for it
+ matches = []
+ for path in variables["VPATH"]:
+ matches += glob.glob(path + "/" + dep)
+ if matches:
+ newdeps.append(matches[0])
+ else:
+ newdeps.append(dep)
+
+ newtarget.append(newdeps)
+ return newtarget #newrule
+ else:
+ return rule
+
+def find(searchstr,paths):
+ """Returns a list of matches for a search pattern
+
+ This is mostly used in implicit rules so it just returns the first
+ match of the search as this is how it work in makefiles
+ """
+
+ matches = []
+ for path in paths:
+ matches += glob.glob(path + "/" + searchstr)
+
+ if len(matches) > 1:
+ matches = [matches[0]]
+ return matches
+
+def imprules(rule,targets,variables): #Implicit Rules
+ """Converts implicit rules to explicit rules
+
+ """
+ if len(rule[0].split(".")) == 1: #this is not a *.* file
+ deps_type = set() #.o for example
+ for dep in rule[1]:
+ if len(dep.split(".")) == 2:
+ deps_type.add(dep.split(".")[1])
+ else:
+ deps_type.add("notype")
+ if len(deps_type) == 1 and "o" in deps_type:
+ searchpaths = ["./"]
+ if "VPATH" in variables:
+ searchpaths += variables["VPATH"]
+ matches = []
+ matches = find(rule[0] + ".c",searchpaths)
+ if matches:
+ newtargets = []
+ newdeps = []
+ newtargets.append(rule[0] + ".o")
+ newdeps.append(matches[0])
+ matches = []
+ for dep in rule[1]:
+ matches += find(dep.split(".")[0] + ".c",searchpaths)
+ if len(matches) == len(rule[1]):
+ newtargets += rule[1]
+ newdeps += matches
+ newtars = []
+ for index in range(len(newtargets)):
+ newtars.append([newtargets[index],[newdeps[index]],[["(CC)"], ["(CFLAGS)"], ["(CPPFLAGS)"], "-c"]])
+
+ rule.append([["(CC)"], ["(LDFLAGS)"], "n.o", ["(LOADLIBES)"], ["(LDLIBS)"]])
+ return rule,newtars
+
+ rule = findfiles(rule,variables)
+ rule.append([])
+ return rule,[]
+
+#file="Makefile2"
+
+#with open(file, encoding="utf-8", errors="replace") as inputfile:
+# scanmakefile(inputfile.read())