You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

gen_boilerplate.py 6.4 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227
  1. #!/usr/bin/env python
  2. # Copyright 2021 The KubeEdge Authors.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. # used to generate copyright from hack/boilerplate.
  16. # shoud be executed in root dir:
  17. # hack/boilerplate/gen_boilerplate.py
  18. #
  19. # modify from https://github.com/kubernetes/kubernetes/blob/master/hack/boilerplate/boilerplate.py
  20. # which is used for check copyright
  21. from __future__ import print_function
  22. import argparse
  23. import datetime
  24. import glob
  25. import os
  26. import re
  27. import sys
  28. parser = argparse.ArgumentParser()
  29. parser.add_argument(
  30. "filenames",
  31. help="list of files to generate copyright, all files if unspecified",
  32. nargs='*')
  33. rootdir = os.path.dirname(__file__) + "/../../"
  34. rootdir = os.path.abspath(rootdir)
  35. parser.add_argument(
  36. "--rootdir", default=rootdir, help="root directory to generate")
  37. default_boilerplate_dir = os.path.join(rootdir, "hack/boilerplate")
  38. parser.add_argument(
  39. "--boilerplate-dir", default=default_boilerplate_dir)
  40. parser.add_argument(
  41. "-v", "--verbose",
  42. help="give verbose output regarding why a file does not pass",
  43. action="store_true")
  44. args = parser.parse_args()
  45. verbose_out = sys.stderr if args.verbose else open("/dev/null", "w")
  46. def get_year():
  47. return str(datetime.datetime.now().year)
  48. def get_refs():
  49. refs = {}
  50. for path in glob.glob(os.path.join(args.boilerplate_dir, "boilerplate.*.txt")):
  51. extension = os.path.basename(path).split(".")[1]
  52. ref_file = open(path, 'r')
  53. ref = ref_file.read()
  54. ref_file.close()
  55. refs[extension] = ref.replace('YEAR', get_year())
  56. return refs
  57. def is_generated_file(filename, data, regexs):
  58. for d in skipped_ungenerated_files:
  59. if d in filename:
  60. return False
  61. p = regexs["generated"]
  62. return p.search(data)
  63. def file_update(filename, refs, regexs):
  64. try:
  65. f = open(filename, 'r')
  66. except Exception as exc:
  67. print("Unable to open %s: %s" % (filename, exc), file=verbose_out)
  68. return False
  69. data = f.read()
  70. f.close()
  71. if 'Copyright' in data[:100]:
  72. print("%s already has copyright." % filename, file=verbose_out)
  73. return True
  74. if not data.strip():
  75. # if nothing, no need to add
  76. return True
  77. # determine if the file is automatically generated
  78. generated = is_generated_file(filename, data, regexs)
  79. basename = os.path.basename(filename)
  80. extension = file_extension(filename)
  81. if generated:
  82. if extension == "go":
  83. extension = "generatego"
  84. elif extension == "bzl":
  85. extension = "generatebzl"
  86. if extension != "":
  87. ref = refs[extension]
  88. else:
  89. ref = refs[basename]
  90. prefix = ''
  91. # remove extra content from the top of files
  92. if extension == "go" or extension == "generatego":
  93. p = regexs["go_build_constraints"]
  94. #(data, found) = p.subn("", data, 1)
  95. m = p.match(data)
  96. if m:
  97. prefix = m.group()
  98. data = data[len(prefix):]
  99. elif extension in ["sh", "py"]:
  100. p = regexs["shebang"]
  101. m = p.match(data)
  102. if m:
  103. prefix = m.group()
  104. data = data[len(prefix):]
  105. try:
  106. with open(filename, 'w') as f:
  107. f.write(prefix + ref + data)
  108. except Exception as exc:
  109. print("Unable to write %s: %s" % (filename, exc), file=verbose_out)
  110. return False
  111. return True
  112. def file_extension(filename):
  113. return os.path.splitext(filename)[1].split(".")[-1].lower()
  114. skipped_dirs = ['third_party', '_gopath', '_output', '.git',
  115. "vendor",
  116. ]
  117. skipped_files = [
  118. 'docs/conf.py', 'docs/Makefile']
  119. skipped_ungenerated_files = [
  120. 'hack/boilerplate/boilerplate.py']
  121. def normalize_files(files):
  122. newfiles = []
  123. for pathname in files:
  124. if any(x in pathname for x in skipped_dirs):
  125. continue
  126. newfiles.append(pathname)
  127. for i, pathname in enumerate(newfiles):
  128. if not os.path.isabs(pathname):
  129. newfiles[i] = os.path.join(args.rootdir, pathname)
  130. return newfiles
  131. def get_files(extensions):
  132. files = []
  133. if len(args.filenames) > 0:
  134. files = args.filenames
  135. else:
  136. for root, dirs, walkfiles in os.walk(args.rootdir):
  137. # don't visit certain dirs. This is just a performance improvement
  138. # as we would prune these later in normalize_files(). But doing it
  139. # cuts down the amount of filesystem walking we do and cuts down
  140. # the size of the file list
  141. for d in skipped_dirs:
  142. if d in dirs:
  143. dirs.remove(d)
  144. for name in walkfiles:
  145. pathname = os.path.join(root, name)
  146. files.append(pathname)
  147. files = normalize_files(files)
  148. outfiles = []
  149. skipped_norm_files = normalize_files(skipped_files)
  150. for pathname in files:
  151. if pathname in skipped_norm_files:
  152. continue
  153. basename = os.path.basename(pathname)
  154. extension = file_extension(pathname)
  155. if extension in extensions or basename in extensions:
  156. outfiles.append(pathname)
  157. return outfiles
  158. def get_regexs():
  159. regexs = {}
  160. # strip // +build \n\n build constraints
  161. regexs["go_build_constraints"] = re.compile(
  162. r"^(// \+build.*\n)+\n", re.MULTILINE)
  163. # strip #!.* from scripts
  164. regexs["shebang"] = re.compile(r"^(#!.*\n)\n*", re.MULTILINE)
  165. # Search for generated files
  166. regexs["generated"] = re.compile('DO NOT EDIT')
  167. return regexs
  168. def main():
  169. regexs = get_regexs()
  170. refs = get_refs()
  171. filenames = get_files(refs.keys())
  172. for filename in filenames:
  173. if not file_update(filename, refs, regexs):
  174. print(filename, file=sys.stdout)
  175. return 0
  176. if __name__ == "__main__":
  177. sys.exit(main())