aboutsummaryrefslogtreecommitdiff
blob: e6c493e639854a28a184a405ec8faef69bfad6c3 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
import os
import glob
from ply import lex
from ply import yacc

def scandir(dir, filetypes):
    files = []
    dirs = [f for f in os.listdir(dir)
        if os.path.isdir(os.path.join(dir, f))]
    for dir_path in dirs:
        files += scandir(dir + "/" + dir_path, filetypes)
    for filetype in filetypes:
        files += glob.glob(dir + "/*" + filetype)
    return files

#lex stuff begins here

def scanincludes(string,inclst):
    tokens = (
            "GINCLUDE",
            "LINCLUDE",
            "BUNDLEINC",
            "IFDEF",
            "ENDIF",
            )

    states = (
            ("com","exclusive"), #comment
            ("ifdef","inclusive"),
            )

    t_ANY_ignore = " \t"

    def t_begin_com(t):
        r"/\*"
        t.lexer.push_state("com")

    def t_com_end(t):
        r"\*/"
        t.lexer.pop_state()
        pass

    def t_line_com(t):
        r"//.*"
        pass

    def t_ANY_begin_if0(t):
        r"\#if[ \t]+0"
        t.lexer.push_state("com")

    def t_com_endif(t):
        r"\#endif"
        t.lexer.pop_state()
        pass

    def t_com_ifdef(t):
        r"\#ifdef"
        t.lexer.push_state("com")

    def t_IFDEF(t):
        r"\#ifdef[ \t]+[a-zA-Z_][a-zA-Z0-9_]*"
        t.value = t.value[6:].strip() #return the ifdef name
        t.lexer.push_state("ifdef")
        return t

    def t_ifdef_ENDIF(t):
        r"\#endif"
        t.lexer.pop_state()
        return t

    def t_GINCLUDE(t):
        r"\#[Ii][Nn][Cc][Ll][Uu][Dd][Ee][ \t]+<.*\.h>"
        t.value = t.value[8:].strip().strip("<>")
        return t

    def t_LINCLUDE(t):
        r"\#[Ii][Nn][Cc][Ll][Uu][Dd][Ee][ \t]+\".*\.h\""
        t.value = t.value[8:].strip().strip('""')
        return t

    def t_BUNDLEINC(t):
        r"\#[Ii][Nn][Cc][Ll][Uu][Dd][Ee][ \t]+<.*>"
        pass

    def t_ANY_error(t):
        #print("Illegal character '%s'" % t.value[0])
        t.lexer.skip(1)

    lexer = lex.lex()

    #lexer.input(string)
    #
    #for tok in lexer:
    #    print(tok)
    #
    #YACC stuff here

    def p_includes2(p):
        """
        includes : includes ginc
                 | includes linc
                 | includes IFDEF includes ENDIF
                 | IFDEF includes ENDIF
        """

    def p_includes(p):
        """
        includes : ginc
                 | linc
        """

    def p_ginclude(p):
        "ginc : GINCLUDE"
        inclst[0].add(p[1])

    def p_linclude(p):
        "linc : LINCLUDE"
        inclst[1].add(p[1])

    def p_error(p):
        #print("syntax error at '%s'" % p.type)
        pass

    yacc.yacc()

    yacc.parse(string)
    return(inclst)


def startscan(dir,filetypes):
    global_hfiles = set()
    local_hfiles = set()
    inclst = [global_hfiles,local_hfiles]

    for file in scandir(dir, filetypes):
        print(file)

        with open(file, encoding="utf-8", errors="replace") as inputfile:
            inclst = scanincludes(inputfile.read(),inclst)

    return(inclst)