OS X自带的Wi-Fi扫描软件

先按住 Option 点击 Wi-Fi 按钮,然后选择打开无线诊断。
再点击菜单栏的 窗口 > 扫描,就能直接看到扫描结果。
Wi-Fi图片
通过该方式可以在路由管理界面选择最优的无线信道,从而避免2.4G(802.11b/g/n)下的干扰。
不过由于现在AP的过多,最优的方式还是选择支持5G(802.11a/h/j/n/ac)的路由器。

阅读全文

OS X的三指拖移功能

自从OS X更新EI Capitan 起,macOS 上针对触控板的「三指拖移」功能被隐藏。需要额外的方式启动。
打开「系统偏好设置」,点选「辅助功能」;
点选「鼠标与触控板」,打开「触控板选项」;
勾选「启用拖移」,点选「三指拖移」,保存退出。

阅读全文

知乎图片下载爬虫

使用了一个知乎目前已经修改的API。但是仍能使用。
目前是3线程,token是问题URL的后缀。
效率不是很高。

#encoding=utf-8
import requests
import urllib2
from bs4 import BeautifulSoup
import urllib
import threading
mutex = threading.Lock()
Num=0
def getMsg(token,x):
import json
url = "https://www.zhihu.com/node/QuestionAnswerListV2"
payload = {
'method':'next',
###修改url_token改变知乎的问题
'params':'{"url_token":'+str(token)+',"pagesize":0,"offset":'+str(x)+'}'
}
headers = {
'accept': "*/*",
'accept-language': "zh-CN,zh;q=0.8,zh-TW;q=0.6",
'content-type': "application/x-www-form-urlencoded",
'host': "www.zhihu.com",
'origin': "https://www.zhihu.com",
'referer': "https://www.zhihu.com/question/41155042",
'user-agent': "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.95 Safari/537.36",
'x-requested-with': "XMLHttpRequest",
'cache-control': "no-cache",
}
post_data=urllib.urlencode(payload)
response = requests.request("POST", url, data=post_data, headers=headers)
json=json.loads(response.text)
msg=json['msg']
if len(msg)>0:
return msg[0]
else:
return "<html></html>"
def inputPage(token,page):
msg=getMsg(token,page)
global Num
soup=BeautifulSoup(msg,'html.parser')
for noscript in soup.find_all('noscript'):
for img in noscript.find_all('img'):
if img['data-original']=="":
continue
print img['data-original']
req = urllib2.urlopen(img['data-original']) #src
content = req.read()
mutex.acquire()
f=open("./test/"+str(Num)+'.jpg','a+')
f.write(content)
mutex.release()
print Num
Num = Num + 1
f.close()
# print '部分下载完成'
class myThread(threading.Thread):
def __init__(self,token,star,end):
threading.Thread.__init__(self)
self.token=token
self.star=star
self.end=end
def run(self):
while self.star<self.end:
inputPage(self.token,self.star)
self.star=self.star+1
print str(self.star)+'页面下载完成'
token=23252018
thread1=myThread(token,0,30)
thread2=myThread(token,31,50)
thread3=myThread(token,51,60)
thread1.start()
thread2.start()
thread3.start()

阅读全文

linux 系统通过apt-get安装配置php5+mysql+apache

安装上mysql5

sudo apt-get install mysql-server

阅读全文

apache 2.4.7实现https服务

创建cert文件夹

mkdir /etc/apache2/cert

阅读全文

Python 发送QQ邮箱邮件代码

QQ邮箱使用SSL的方式去登录方法,所以和普通的发送邮箱代码不太一样。

s = smtplib.SMTP_SSL("smtp.qq.com", 465)
s.login(_user, _pwd)

阅读全文

Python 发送Email模块

#encoding=utf-8
import smtplib
from email.mime.text import MIMEText
from email.header import Header
from email.utils import formataddr
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
def Email_163(receiver,subject,text,model):
Sender = "xxx" # 发件人Email地址
AuthorizationCode = 'xxxx' # 授权码,163邮箱开启SMTP服务时授权第三方登陆邮箱的授权码
smtp_server = 'xx' # SMTP服务器地址
# HTML内容
# msg = MIMEText('<html><body><h1>Hello</h1>' +
# '<p>send by <a href="http://www.python.org">Python</a>...</p>' +
# '</body></html>', 'html', 'utf-8')
msgRoot = MIMEMultipart('related')
msgRoot['Subject'] = Header(subject, 'utf-8') # 邮件主题
msgRoot['From'] = formataddr(['xx', Sender]) # 发件人,显示在收件人界面上
msgRoot['To'] = formataddr(['', receiver]) # 收件人
msgAlternative = MIMEMultipart('alternative')
msgRoot.attach(msgAlternative)
msgAlternative.attach(MIMEText(text, model, 'utf-8'))
fp = open('test.jpg', 'rb')
msgImage = MIMEImage(fp.read())
fp.close()
msgImage.add_header('Content-ID', '<image1>')
msgRoot.attach(msgImage)
try:
smtpObj = smtplib.SMTP()
smtpObj.connect(smtp_server, 25) # 25 为 SMTP 端口号
smtpObj.login(Sender, AuthorizationCode) # 登陆邮箱
smtpObj.sendmail(Sender, receiver, msgRoot.as_string()) # 发送邮件
smtpObj.quit() # 推出
print u"发送成果"
except smtplib.SMTPException,e:
print u"发送失败"
print e
Email_163("xxx",'你好',''' <html>
<head>测试一下</head>
<body>
你们好啊<br>
<br><img src="cid:image1"></br>
</p>
</body>
</html> ''','html')

阅读全文

PHP_MD5加密

md5($str)

使用md5(string,raw)函数即可完成md5加密

阅读全文

链家房屋爬虫

使用xlwt库将爬取内容写入excel

#coding=UTF-8
import urllib2
from bs4 import BeautifulSoup
import sys
import xlwt
page=1
num=0
book=xlwt.Workbook(encoding="utf-8",style_compression=0)
sheet=book.add_sheet('test',cell_overwrite_ok=True)
while page<100:
pages = str(page)
url = "http://ty.fangjia.com/zufang/--e-" + pages + "#pagelist"
reload(sys)
sys.setdefaultencoding('utf-8')
html = urllib2.urlopen(url)
soup = BeautifulSoup(html, "html.parser")
house = soup.find("div", class_="house")
for home in house.find_all("li", {"name": "__page_click_area"}):
for title in home.find_all("span", class_="tit"):
for text in title.stripped_strings:
sheet.write(num+1, 0, text)
for address in home.find_all("span", class_="address"):
i=1
for text in address.stripped_strings:
sheet.write(num + 1, i, text)
i=i+1
for attribute in home.find_all("span", class_="attribute"):
for text in attribute.stripped_strings:
sheet.write(num + 1, 3, text)
for price in home.find_all("span", class_="xq_aprice xq_esf_width"):
for text in price.em.stripped_strings:
sheet.write(num + 1, 4, text+ "万")
num=num+1
print str(page) + "页插入完成"
book.save('/Users/dubo/Desktop/typrice2.xls')
page = page + 1
print "总计"+str(num)+"套房屋"

阅读全文

豆瓣读书Top250

#coding=UTF-8
import urllib2
from bs4 import BeautifulSoup
# https://book.douban.com/top250?start=
time=0
sum=25
while time<=225:
times=str(time)
url="https://book.douban.com/top250?start="+times
req = urllib2.urlopen(url)
content = req.read()
soup=BeautifulSoup(content,"html.parser")
print "----------page=" + str(sum/25) + "-----------"
for link in soup.find_all('div',{"class":"pl2"}):
for text in link.find_all("a"):
for none in text.stripped_strings:
print none
sum=sum+1
time=time+25
print sum-25

阅读全文