Browse Source

interlinks: Cleanup style

Fix pep8 issues and change to spaces for indentation to be consistent
with rest of pelican.

Also there seemed to be an invisible character that sneaked in somehow
to the head of the file without any apparent purpose which has been
removed.
Chris Scutcher 6 years ago
parent
commit
00475a0123
1 changed files with 49 additions and 45 deletions
  1. 49 45
      interlinks/interlinks.py

+ 49 - 45
interlinks/interlinks.py

@@ -1,62 +1,66 @@
-# -*- coding: utf-8 -*-
-
+# -*- coding: utf-8 -*-
 """
 Interlinks
 =========================
-
-This plugin allows you to include "interwiki" or shortcuts links into the blog, as keyword>rest_of_url
-
+This plugin allows you to include "interwiki" or shortcuts links into the blog,
+as keyword>rest_of_url
 """
+import re
+
+from pelican import signals
 
 from bs4 import BeautifulSoup
 from bs4 import SoupStrainer
-from pelican import signals
-import re
 
 interlinks = {}
 
-def getSettings (generator):
 
-	global interlinks
+def getSettings(generator):
+
+    global interlinks
+
+    interlinks = {'this': generator.settings['SITEURL']+"/"}
+    if 'INTERLINKS' in generator.settings:
+        for key, value in generator.settings['INTERLINKS'].items():
+            interlinks[key] = value
 
-	interlinks = {'this': generator.settings['SITEURL']+"/"}
-	if 'INTERLINKS' in generator.settings:
-		for key, value in generator.settings['INTERLINKS'].items():
-			interlinks[key] = value
 
-			
 def parse_links(instance):
 
-	if instance._content is not None:
-		content = instance._content
-		
-		if '<a' in content:
-			text = BeautifulSoup(content, "html.parser", parse_only=SoupStrainer("a"))
-			for link in text.find_all("a",href=re.compile("(.+?)>")):
-				old_tag = str(link)
-				url = link.get('href')
-				m = re.search(r"(.+?)>", url).groups()
-				name = m[0]
-				if name in interlinks:
-					hi = url.replace(name + ">", interlinks[name])
-					link['href'] = hi
-				
-				content = content.replace(old_tag, str(link))
-
-		if '<img' in content:
-			text = BeautifulSoup(content, "html.parser", parse_only=SoupStrainer("img"))
-			for img in text.find_all('img', src=re.compile("(.+?)>")):
-				old_tag = str(img)
-				url = img.get('src')
-				m = re.search(r"(.+?)>", url).groups()
-				name = m[0]
-				if name in interlinks:
-					hi = url.replace(name+">",interlinks[name])
-					img['src'] = hi
-				content = content.replace(old_tag.replace("&gt;", ">").replace("/>",">"), str(img))
-
-		instance._content = content
+    if instance._content is not None:
+        content = instance._content
+
+        if '<a' in content:
+            text = BeautifulSoup(
+                content, "html.parser", parse_only=SoupStrainer("a"))
+            for link in text.find_all("a", href=re.compile("(.+?)>")):
+                old_tag = str(link)
+                url = link.get('href')
+                m = re.search(r"(.+?)>", url).groups()
+                name = m[0]
+                if name in interlinks:
+                    hi = url.replace(name + ">", interlinks[name])
+                    link['href'] = hi
+
+                content = content.replace(old_tag, str(link))
+
+        if '<img' in content:
+            text = BeautifulSoup(
+                content, "html.parser", parse_only=SoupStrainer("img"))
+            for img in text.find_all('img', src=re.compile("(.+?)>")):
+                old_tag = str(img)
+                url = img.get('src')
+                m = re.search(r"(.+?)>", url).groups()
+                name = m[0]
+                if name in interlinks:
+                    hi = url.replace(name+">", interlinks[name])
+                    img['src'] = hi
+                content = content.replace(
+                    old_tag.replace("&gt;", ">").replace("/>", ">"), str(img))
+
+        instance._content = content
+
 
 def register():
-	signals.generator_init.connect(getSettings)
-	signals.content_object_init.connect(parse_links)
+    signals.generator_init.connect(getSettings)
+    signals.content_object_init.connect(parse_links)