Related
What I'm basically trying to do is use flask_uploads to find the path for an uploaded photo. I'm getting 'RuntimeError: no destination for set images' whenever I run the code. I've been over about 10 different tutorials and have gone over the code about 50 times. Please, for my sanity, help me out.
Here's my code
from colorthief import ColorThief
import matplotlib.pyplot as plt
from flask_uploads import configure_uploads, IMAGES, UploadSet
from flask import Flask, render_template, redirect, url_for, request
from flask_bootstrap import Bootstrap
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, FileField
import os
class InsertPic(FlaskForm):
image = FileField('Select Your Picture')
URL = 'D:\Python Porfolio\Colors\static\images'
app = Flask(__name__)
app.config['SECRET_KEY'] = '8BYkEfBA6O6donzWlSihBXox7C0sKR6b'
app.config['UPLOADED_PHOTOS_DEST'] = 'static/images'
Bootstrap(app)
images = UploadSet('images', IMAGES)
configure_uploads(app, images)
#app.route('/', methods=['GET', 'POST'])
def index():
form = InsertPic()
if form.validate_on_submit():
filename = images.save(form.image.data)
file_url = images.url(filename)
ct = ColorThief(f"{file_url}")
colors = ct.get_palette(color_count=11)
plt.imshow([[colors[a] for a in range(10)]])
plt.axis('off')
plt.savefig("output.jpg", bbox_inches='tight', pad_inches=0)
# Convet to HEX Values
separate_colors = []
for color in colors:
a = f"#{color[0]:02x}{color[1]:02x}{color[0]:02x}"
separate_colors.append(a)
return render_template('colors.html', colors=separate_colors)
return render_template('index.html', form=form)
if __name__ == "__main__":
app.run(debug=True)
Here's my Traceback info:
File "D:\prjects\pythonProject3\main.py", line 23, in <module>
configure_uploads(app, images)
File "D:\prjects\pythonProject3\venv\lib\site-packages\flask_uploads\flask_uploads.py", line 122, in configure_uploads
config = config_for_set(uset, app, defaults)
File "D:\prjects\pythonProject3\venv\lib\site-packages\flask_uploads\flask_uploads.py", line 84, in config_for_set
raise RuntimeError("no destination for set %s" % uset.name)
RuntimeError: no destination for set images
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import random
from io import open
from argparse import ArgumentParser, FileType, ArgumentDefaultsHelpFormatter
from collections import Counter
from concurrent.futures import ProcessPoolExecutor
import logging
from deepwalk import graph
from deepwalk import walks as serialized_walks
from walks import WalksCorpus
from gensim.models import Word2Vec
from deepwalk.skipgram import Skipgram
from six import text_type as unicode
from six import iteritems
from six.moves import range
import psutil
from multiprocessing import cpu_count
p = psutil.Process(os.getpid())
try:
p.set_cpu_affinity(list(range(cpu_count())))
except AttributeError:
try:
p.cpu_affinity(list(range(cpu_count())))
except AttributeError:
pass
logger = logging.getLogger(__name__)
LOGFORMAT = "%(asctime).19s %(levelname)s %(filename)s: %(lineno)s %(message)s"
def debug(type_, value, tb):
if hasattr(sys, 'ps1') or not sys.stderr.isatty():
sys.__excepthook__(type_, value, tb)
else:
import traceback
import pdb
traceback.print_exception(type_, value, tb)
print(u"\n")
pdb.pm()
def process(args):
if args.format == "adjlist":
G = graph.load_adjacencylist(args.input, undirected=args.undirected)
elif args.format == "edgelist":
G = graph.load_edgelist(args.input, undirected=args.undirected)
elif args.format == "mat":
G = graph.load_matfile(args.input, variable_name=args.matfile_variable_name, undirected=args.undirected)
else:
raise Exception("Unknown file format: '%s'. Valid formats: 'adjlist', 'edgelist', 'mat'" % args.format)
print("Number of nodes: {}".format(len(G.nodes())))
num_walks = len(G.nodes()) * args.number_walks
print("Number of walks: {}".format(num_walks))
data_size = num_walks * args.walk_length
print("Data size (walks*length): {}".format(data_size))
if data_size < args.max_memory_data_size:
print("Walking...")
walks = graph.build_deepwalk_corpus(G, num_paths=args.number_walks,
path_length=args.walk_length, alpha=0, rand=random.Random(args.seed))
print("Training...")
model = Word2Vec(walks, size=args.representation_size, window=args.window_size, min_count=0, sg=1, hs=1, workers=args.workers)
else:
print("Data size {} is larger than limit (max-memory-data-size: {}). Dumping walks to disk.".format(data_size, args.max_memory_data_size))
print("Walking...")
walks_filebase = args.output + ".walks"
walk_files = serialized_walks.write_walks_to_disk(G, walks_filebase, num_paths=args.number_walks,
path_length=args.walk_length, alpha=0, rand=random.Random(args.seed),
num_workers=args.workers)
print("Counting vertex frequency...")
if not args.vertex_freq_degree:
vertex_counts = serialized_walks.count_textfiles(walk_files, args.workers)
else:
# use degree distribution for frequency in tree
vertex_counts = G.degree(nodes=G.iterkeys())
print("Training...")
walks_corpus = serialized_walks.WalksCorpus(walk_files)
model = Skipgram(sentences=walks_corpus, vocabulary_counts=vertex_counts,
size=args.representation_size,
window=args.window_size, min_count=0, trim_rule=None, workers=args.workers)
model.wv.save_word2vec_format(args.output)
def main():
parser = ArgumentParser("deepwalk",
formatter_class=ArgumentDefaultsHelpFormatter,
conflict_handler='resolve')
parser.add_argument("--debug", dest="debug", action='store_true', default=False,
help="drop a debugger if an exception is raised.")
parser.add_argument('--format', default='adjlist',
help='File format of input file')
parser.add_argument('--input', nargs='?', required=True,
help='Input graph file')
parser.add_argument("-l", "--log", dest="log", default="INFO",
help="log verbosity level")
parser.add_argument('--matfile-variable-name', default='network',
help='variable name of adjacency matrix inside a .mat file.')
parser.add_argument('--max-memory-data-size', default=1000000000, type=int,
help='Size to start dumping walks to disk, instead of keeping them in memory.')
parser.add_argument('--number-walks', default=10, type=int,
help='Number of random walks to start at each node')
parser.add_argument('--output', required=True,
help='Output representation file')
parser.add_argument('--representation-size', default=64, type=int,
help='Number of latent dimensions to learn for each node.')
parser.add_argument('--seed', default=0, type=int,
help='Seed for random walk generator.')
parser.add_argument('--undirected', default=True, type=bool,
help='Treat graph as undirected.')
parser.add_argument('--vertex-freq-degree', default=False, action='store_true',
help='Use vertex degree to estimate the frequency of nodes '
'in the random walks. This option is faster than '
'calculating the vocabulary.')
parser.add_argument('--walk-length', default=40, type=int,
help='Length of the random walk started at each node')
parser.add_argument('--window-size', default=5, type=int,
help='Window size of skipgram model.')
parser.add_argument('--workers', default=1, type=int,
help='Number of parallel processes.')
args = parser.parse_args()
numeric_level = getattr(logging, args.log.upper(), None)
logging.basicConfig(format=LOGFORMAT)
logger.setLevel(numeric_level)
if args.debug:
sys.excepthook = debug
process(args)
if __name__ == "__main__":
sys.exit(main())
Error:
Traceback (most recent call last): File "main.py", line 165, in sys.exit(main()) File "main.py", line 162, in main process(args) File "main.py", line 93, in process walks_corpus = serialized_walks.WalksCorpus(walk_files) AttributeError: 'module' object has no attribute 'WalksCorpus'
Why do I get this error?
It looks as though you are importing WalksCorpus on its own from walks with from walks import WalksCorpus. Then when you try to use WalksCorpus method you are looking for it with in serialized_walks which I assume does not have the WalksCorpus method in it.
Try changing this line.
walks_corpus = serialized_walks.WalksCorpus(walk_files)
To:
walks_corpus = WalksCorpus(walk_files)
I am new to Scrapy and trying to crawl a couple of links as a test using Scrapy. Whenever I run scrapy crawl tier1, I get "TypeError: object() takes no parameters" as the following:
Traceback (most recent call last):
File "/Users/btaek/TaeksProgramming/adv/crawler/lib/python2.7/site-packages/twisted/internet/defer.py", line 653, in _runCallbacks
current.result = callback(current.result, *args, **kw)
File "/Users/btaek/TaeksProgramming/adv/crawler/adv_crawler/adv_crawler/spiders/tier1_crawler.py", line 93, in parse
mk_loader.add_xpath('title', 'h1[#class="top_title"]') # Title of the article
File "/Users/btaek/TaeksProgramming/adv/crawler/lib/python2.7/site-packages/scrapy/loader/__init__.py", line 167, in add_xpath
self.add_value(field_name, values, *processors, **kw)
File "/Users/btaek/TaeksProgramming/adv/crawler/lib/python2.7/site-packages/scrapy/loader/__init__.py", line 77, in add_value
self._add_value(field_name, value)
File "/Users/btaek/TaeksProgramming/adv/crawler/lib/python2.7/site-packages/scrapy/loader/__init__.py", line 91, in _add_value
processed_value = self._process_input_value(field_name, value)
File "/Users/btaek/TaeksProgramming/adv/crawler/lib/python2.7/site-packages/scrapy/loader/__init__.py", line 150, in _process_input_value
return proc(value)
File "/Users/btaek/TaeksProgramming/adv/crawler/lib/python2.7/site-packages/scrapy/loader/processors.py", line 28, in __call__
next_values += arg_to_iter(func(v))
TypeError: object() takes no parameters
2017-08-23 17:25:02 [tier1-parse-logger] INFO: Entered the parse function to parse and index: http://news.mk.co.kr/newsRead.php?sc=30000001&year=2017&no=535166
2017-08-23 17:25:02 [tier1-parse-logger] ERROR: Error (object() takes no parameters) when trying to parse <<date>> from a mk article: http://news.mk.co.kr/newsRead.php?sc=30000001&year=2017&no=535166
2017-08-23 17:25:02 [tier1-parse-logger] ERROR: Error (object() takes no parameters) when trying to parse <<author>> from a mk article: http://news.mk.co.kr/newsRead.php?sc=30000001&year=2017&no=535166
2017-08-23 17:25:02 [scrapy.core.scraper] ERROR: Spider error processing <GET http://news.mk.co.kr/newsRead.php?sc=30000001&year=2017&no=535166> (referer: None)
Traceback (most recent call last):
File "/Users/btaek/TaeksProgramming/adv/crawler/lib/python2.7/site-packages/twisted/internet/defer.py", line 653, in _runCallbacks
current.result = callback(current.result, *args, **kw)
File "/Users/btaek/TaeksProgramming/adv/crawler/adv_crawler/adv_crawler/spiders/tier1_crawler.py", line 93, in parse
mk_loader.add_xpath('title', 'h1[#class="top_title"]') # Title of the article
File "/Users/btaek/TaeksProgramming/adv/crawler/lib/python2.7/site-packages/scrapy/loader/__init__.py", line 167, in add_xpath
self.add_value(field_name, values, *processors, **kw)
File "/Users/btaek/TaeksProgramming/adv/crawler/lib/python2.7/site-packages/scrapy/loader/__init__.py", line 77, in add_value
self._add_value(field_name, value)
File "/Users/btaek/TaeksProgramming/adv/crawler/lib/python2.7/site-packages/scrapy/loader/__init__.py", line 91, in _add_value
processed_value = self._process_input_value(field_name, value)
File "/Users/btaek/TaeksProgramming/adv/crawler/lib/python2.7/site-packages/scrapy/loader/__init__.py", line 150, in _process_input_value
return proc(value)
File "/Users/btaek/TaeksProgramming/adv/crawler/lib/python2.7/site-packages/scrapy/loader/processors.py", line 28, in __call__
next_values += arg_to_iter(func(v))
TypeError: object() takes no parameters
And, my spider file (tier1_crawler.py):
# -*- coding: utf-8 -*-
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import os
sys.path.append(os.path.abspath('..'))
import logging
import scrapy
from scrapy.loader import ItemLoader
from adv_crawler.items import AdvCrawlerItem
from datetime import datetime, date, time
t1_parse_logger = logging.getLogger("tier1-parse-logger")
t1_parse_logger.LOG_FILE = "Tier1-log.txt"
content_type_dic = {
'news': 'news',
}
class Tier1Crawler(scrapy.Spider):
name = "tier1"
def start_requests(self):
urls = ['http://news.mk.co.kr/newsRead.php?sc=30000001&year=2017&no=535982',
'http://news.mk.co.kr/newsRead.php?sc=30000001&year=2017&no=535166',
]
for url in urls:
yield scrapy.Request(url=url, callback=self.parse)
def parse(self, response):
t1_parse_logger.info("Entered the parse function to parse and index: %s" % response.url) # Log at the beginning of the parse function
item_loader = ItemLoader(item=AdvCrawlerItem(), response=response)
if 'mk.co.kr' in response.url:
mk_loader = item_loader.nested_xpath('//div[#id="top_header"]/div[#class="news_title"]/div[#class="news_title_text"]')
try:
mk_loader.add_xpath('date', 'div[#class="news_title_author"]/ul/li[#class="lasttime"]')
except AttributeError: # if the date is not in "lasttime" li tag
mk_loader.add_xpath('date', 'div[#class="news_title_author"]/ul/li[#class="lasttime1"]')
except Exception as e: # in case the error is not AttributeError
t1_parse_logger.error("Error "+"("+str(e)+")"+" when trying to parse <<date>> from a mk article: %s" % response.url)
try:
mk_loader.add_xpath('author', 'div[#class="news_title_author"]/ul/li[#class="author"]')
except AttributeError: # in case there is no author (some mk articles have no author)
item_loader.add_value('author', "None") # ir error, replace with the line below
# item['author'] = "None" # if the above gives any error, replace the above with this line
except Exception as e: # in case the error is not AttributeError
t1_parse_logger.error("Error "+"("+str(e)+")"+" when trying to parse <<author>> from a mk article: %s" % response.url)
item_loader.add_xpath('content', '//div[#id="Content"]/div[#class="left_content"]/div[#id="article_body"]/div[#class="art_txt"]') # Content of the article (entire contents)
mk_loader.add_xpath('title', 'h1[#class="top_title"]') # Title of the article
item_loader.add_value('content_type', content_type_dic['news'])
item_loader.add_value('timestamp', str(datetime.now())) # timestamp of when the document is being indexed
item_loader.add_value('url', response.url) # url of the article
t1_parse_logger.info("Parsed and indexed: %s" % response.url)
return item_loader.load_item()
And, my items.py file:
# -*- coding: utf-8 -*-
import scrapy
from scrapy.loader.processors import Join, MapCompose, TakeFirst
from w3lib.html import remove_tags
def filter_date(value):
if isinstance(value, unicode):
(year, month, day) = str(value.split(" ")[-2]).split(".")
return year+"-"+month+"-"+day
def filter_utf(value):
if isinstance(value, unicode):
return value.encode('utf-8')
class AdvCrawlerItem(scrapy.Item):
author = scrapy.Field(input_processor=MapCompose(remove_tags, TakeFirst, filter_utf),) # Name of the publisher/author
content = scrapy.Field(input_processor=MapCompose(remove_tags, Join, filter_utf),) # Content of the article (entire contents)
content_type = scrapy.Field()
date = scrapy.Field(input_processor=MapCompose(remove_tags, TakeFirst, filter_date),)
timestamp = scrapy.Field() # timestamp of when the document is being indexed
title = scrapy.Field(input_processor=MapCompose(remove_tags, TakeFirst, filter_utf),) # title of the article
url = scrapy.Field() # url of the article
And, pipelines.py file:
import json
from scrapy import signals
from scrapy.exporters import JsonLinesItemExporter
class AdvCrawlerJsonExportPipeline(object):
def open_spider(self, spider):
self.file = open('crawled-articles1.txt', 'w')
def close_spider(self, spider):
self.file.close()
def process_item(self, item, spider):
line = json.dummps(dict(item)) + "\n"
self.file.write(line)
return item
I am aware that "TypeError: object() takes no parameters" error is usually thrown when __init__ method of a class is not defined at all or not defined to take in parameter(s).
However, in the case above, how can i fix the error? Am I doing something wrong using the item loader or nested item loader??
When using scrapy processors you should use the classes to create objects that do the processing:
# wrong
field = Field(output_processor=MapCompose(TakeFirst))
# right
field = Field(output_processor=MapCompose(TakeFirst()))
^^
I wanna fit model in python 3.5 (numpy 1.11.2, sklearn 0.18.1)
import pickle
from sklearn.tree import DecisionTreeRegressor
clf = DecisionTreeRegressor()
X = np.array([[1,2,3,4],[1,1,2,2],[1,2,1,2]]).T
y = [1,1,0,0]
clf.fit(X,y)
with open(join(path_to_data, 'models', 'debug.model'), 'wb') as f:
pickle.dump(clf, f, protocol=2)
After pickling I try to unpickle model in python 2.7 (numpy 1.11.2, sklearn 0.18.1)
import pickle
with open(join(path_to_data, 'models', 'debug.model'), 'rb') as f:
clf = pickle.load(f)
but it raise error:
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-78-2eaf35b8e6d9> in <module>()
----> 1 joblib.load(join(path_to_data,'models','queryforest_debug.model'))
/home/iiivanitskiy/.local/lib/python2.7/site-packages/sklearn/externals/joblib/numpy_pickle.pyc in load(filename, mmap_mode)
573 return load_compatibility(fobj)
574
--> 575 obj = _unpickle(fobj, filename, mmap_mode)
576
577 return obj
/home/iiivanitskiy/.local/lib/python2.7/site-packages/sklearn/externals/joblib/numpy_pickle.pyc in _unpickle(fobj, filename, mmap_mode)
505 obj = None
506 try:
--> 507 obj = unpickler.load()
508 if unpickler.compat_mode:
509 warnings.warn("The file '%s' has been generated with a "
/usr/lib/python2.7/pickle.pyc in load(self)
856 while 1:
857 key = read(1)
--> 858 dispatch[key](self)
859 except _Stop, stopinst:
860 return stopinst.value
/home/iiivanitskiy/.local/lib/python2.7/site-packages/sklearn/externals/joblib/numpy_pickle.pyc in load_build(self)
325 NDArrayWrapper is used for backward compatibility with joblib <= 0.9.
326 """
--> 327 Unpickler.load_build(self)
328
329 # For backward compatibility, we support NDArrayWrapper objects.
/usr/lib/python2.7/pickle.pyc in load_build(self)
1215 setstate = getattr(inst, "__setstate__", None)
1216 if setstate:
-> 1217 setstate(state)
1218 return
1219 slotstate = None
ValueError: non-string names in Numpy dtype unpickling
Do we have the way to unpickle in python 2 DecisionTreeRegressor, which was pickled in python 3?
Before I get the obvious response, about checking the database, itself, I'll start by saying that I've already checked out this post, which has a nearly identical setup to mine, and the solutions of deleting the database and migrations, and adding a default value in the table did not work, as expected. I do, however, expect the solution to be quite simple.
So, that said, I'm doing the tutorial for django-rest-framework and my problem started on part 4. The tutorial says as follows:
Now if you open up the browser again and refresh the page you'll see a 'Login' link in the top right of the page. If you log in as one of the users you created earlier, you'll be able to create code snippets again.
Once you've created a few code snippets, navigate to the '/users/'
endpoint, and notice that the representation includes a list of the
snippet pks that are associated with each user, in each user's
'snippets' field.
So, I tried to create the "snippets" objects with the manage.py shell, as in the first part of the tutorial, with the following code:
from snippets.models import Snippet
from snippets.serializers import SnippetSerializer
snippet = Snippet(code='foo = "bar"\n')
snippet.save()
And that's where it ends. .save() triggers the error, which I've printed the traceback for, below.
Using the advice from the previously answered question, I've very slightly changed my setup, but I'm still getting the error. Here's the setup:
models.py:
from django.db import models
from pygments.lexers import get_all_lexers
from pygments.styles import get_all_styles
from pygments.lexers import get_lexer_by_name
from pygments.formatters.html import HtmlFormatter
from pygments import highlight
LEXERS = [item for item in get_all_lexers() if item[1]]
LANGUAGE_CHOICES = sorted([(item[1][0], item[0]) for item in LEXERS])
STYLE_CHOICES = sorted((item, item) for item in get_all_styles())
class Snippet(models.Model):
owner = models.ForeignKey('auth.User', related_name='snippets')
highlighted = models.TextField(default='')
created = models.DateTimeField(auto_now_add=True)
title = models.CharField(max_length=100, blank=True, default='')
code = models.TextField()
linenos = models.BooleanField(default=False)
language = models.CharField(choices=LANGUAGE_CHOICES, default='python', max_length=100)
style = models.CharField(choices=STYLE_CHOICES, default='friendly',max_length=100 )
class Meta:
ordering = ('created',)
def save(self, *args, **kwargs):
lexer = get_lexer_by_name(self.language)
linenos = self.linenos and 'table' or False
options = self.title and {'title': self.title} or {}
formatter = HtmlFormatter(style=self.style, linenos=linenos,
full=True, **options)
self.highlighted = highlight(self.code, lexer, formatter)
super(Snippet, self).save(*args, **kwargs)
serializers.py:
from rest_framework import serializers
from snippets.models import Snippet, LANGUAGE_CHOICES,STYLE_CHOICES
from django.contrib.auth.models import User
class SnippetSerializer(serializers.ModelSerializer):
owner = serializers.ReadOnlyField(source='owner.username')
class Meta:
model = Snippet
fields = ('id', 'title', 'code', 'linenos', 'language', 'style', 'owner')
class UserSerializer(serializers.ModelSerializer):
snippets = serializers.PrimaryKeyRelatedField(many=True, queryset=Snippet.objects.all())
class Meta:
model = User
fields = ('id', 'username', 'snippets')
views.py:
from snippets.models import Snippet
from snippets.serializers import SnippetSerializer, UserSerializer
from rest_framework import generics
from django.contrib.auth.models import User
from rest_framework import permissions
class SnippetList(generics.ListCreateAPIView):
queryset = Snippet.objects.all()
serializer_class = SnippetSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
def perform_create(self, serializer):
serializer.save(owner=self.request.user)
class SnippetDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = Snippet.objects.all()
serializer_class = SnippetSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
class UserList(generics.ListAPIView):
queryset = User.objects.all()
serializer_class = UserSerializer
class UserDetail(generics.RetrieveAPIView):
queryset = User.objects.all()
serializer_class = UserSerializer
/snippets/urls.py
from django.conf.urls import url, include
from snippets.views import SnippetList, SnippetDetail, UserList, UserDetail
from rest_framework.urlpatterns import format_suffix_patterns
urlpatterns = [
url(r'^snippets/$', SnippetList.as_view()),
url(r'^snippets/(?P<pk>[0-9]+)/$', SnippetDetail.as_view()),
url(r'^users/$', UserList.as_view()),
url(r'^users/(?P<pk>[0-9]+)/$', UserDetail.as_view()),
]
urlpatterns = format_suffix_patterns(urlpatterns)
urlpatterns += [
url(r'^api-auth/', include('rest_framework.urls',
namespace='rest_framework')),
]
urls.py:
from django.conf.urls import url, include
urlpatterns = [
url(r'^', include('snippets.urls')),
]
and, lastly, the ugly traceback:
In [6]: snippet.save()
---------------------------------------------------------------------------
OperationalError Traceback (most recent call last)
C:\Users\Jordon\AppData\Local\Programs\Python\Python35\lib\site-packages\django\db\backends\utils.py in execute(self, sql, params)
63 else:
---> 64 return self.cursor.execute(sql, params)
65
C:\Users\Jordon\AppData\Local\Programs\Python\Python35\lib\site-packages\django\db\backends\sqlite3\base.py in execute(self, query, params)
322 query = self.convert_query(query)
--> 323 return Database.Cursor.execute(self, query, params)
324
OperationalError: table snippets_snippet has no column named owner_id
The above exception was the direct cause of the following exception:
OperationalError Traceback (most recent call last)
<ipython-input-6-fe28bd3dc796> in <module>()
----> 1 snippet.save()
D:\GitHub Repositories\Django\tutorial\snippets\models.py in save(self, *args, **kwargs)
34 full=True, **options)
35 self.highlighted = highlight(self.code, lexer, formatter)
---> 36 super(Snippet, self).save(*args, **kwargs)
C:\Users\Jordon\AppData\Local\Programs\Python\Python35\lib\site-packages\django\db\models\base.py in save(self, force_insert, force_update, using, update_fields)
698
699 self.save_base(using=using, force_insert=force_insert,
--> 700 force_update=force_update, update_fields=update_fields)
701 save.alters_data = True
702
C:\Users\Jordon\AppData\Local\Programs\Python\Python35\lib\site-packages\django\db\models\base.py in save_base(self, raw, force_insert, force_update, using, update_fields)
726 if not raw:
727 self._save_parents(cls, using, update_fields)
--> 728 updated = self._save_table(raw, cls, force_insert, force_update, using, update_fields)
729 # Store the database on which the object was saved
730 self._state.db = using
C:\Users\Jordon\AppData\Local\Programs\Python\Python35\lib\site-packages\django\db\models\base.py in _save_table(self, raw, cls, force_insert, force_update, using, update_fields)
810
811 update_pk = bool(meta.has_auto_field and not pk_set)
--> 812 result = self._do_insert(cls._base_manager, using, fields, update_pk, raw)
813 if update_pk:
814 setattr(self, meta.pk.attname, result)
C:\Users\Jordon\AppData\Local\Programs\Python\Python35\lib\site-packages\django\db\models\base.py in _do_insert(self, manager, using, fields, update_pk, raw)
849 """
850 return manager._insert([self], fields=fields, return_id=update_pk,
--> 851 using=using, raw=raw)
852
853 def delete(self, using=None, keep_parents=False):
C:\Users\Jordon\AppData\Local\Programs\Python\Python35\lib\site-packages\django\db\models\manager.py in manager_method(self, *args, **kwargs)
120 def create_method(name, method):
121 def manager_method(self, *args, **kwargs):
--> 122 return getattr(self.get_queryset(), name)(*args, **kwargs)
123 manager_method.__name__ = method.__name__
124 manager_method.__doc__ = method.__doc__
C:\Users\Jordon\AppData\Local\Programs\Python\Python35\lib\site-packages\django\db\models\query.py in _insert(self, objs, fields, return_id, raw, using)
1037 query = sql.InsertQuery(self.model)
1038 query.insert_values(fields, objs, raw=raw)
-> 1039 return query.get_compiler(using=using).execute_sql(return_id)
1040 _insert.alters_data = True
1041 _insert.queryset_only = False
C:\Users\Jordon\AppData\Local\Programs\Python\Python35\lib\site-packages\django\db\models\sql\compiler.py in execute_sql(self, return_id)
1058 with self.connection.cursor() as cursor:
1059 for sql, params in self.as_sql():
-> 1060 cursor.execute(sql, params)
1061 if not (return_id and cursor):
1062 return
C:\Users\Jordon\AppData\Local\Programs\Python\Python35\lib\site-packages\django\db\backends\utils.py in execute(self, sql, params)
77 start = time()
78 try:
---> 79 return super(CursorDebugWrapper, self).execute(sql, params)
80 finally:
81 stop = time()
C:\Users\Jordon\AppData\Local\Programs\Python\Python35\lib\site-packages\django\db\backends\utils.py in execute(self, sql, params)
62 return self.cursor.execute(sql)
63 else:
---> 64 return self.cursor.execute(sql, params)
65
66 def executemany(self, sql, param_list):
C:\Users\Jordon\AppData\Local\Programs\Python\Python35\lib\site-packages\django\db\utils.py in __exit__(self, exc_type, exc_value, traceback)
93 if dj_exc_type not in (DataError, IntegrityError):
94 self.wrapper.errors_occurred = True
---> 95 six.reraise(dj_exc_type, dj_exc_value, traceback)
96
97 def __call__(self, func):
C:\Users\Jordon\AppData\Local\Programs\Python\Python35\lib\site-packages\django\utils\six.py in reraise(tp, value, tb)
683 value = tp()
684 if value.__traceback__ is not tb:
--> 685 raise value.with_traceback(tb)
686 raise value
687
C:\Users\Jordon\AppData\Local\Programs\Python\Python35\lib\site-packages\django\db\backends\utils.py in execute(self, sql, params)
62 return self.cursor.execute(sql)
63 else:
---> 64 return self.cursor.execute(sql, params)
65
66 def executemany(self, sql, param_list):
C:\Users\Jordon\AppData\Local\Programs\Python\Python35\lib\site-packages\django\db\backends\sqlite3\base.py in execute(self, query, params)
321 return Database.Cursor.execute(self, query)
322 query = self.convert_query(query)
--> 323 return Database.Cursor.execute(self, query, params)
324
325 def executemany(self, query, param_list):
OperationalError: table snippets_snippet has no column named owner_id
First of all, you have not provided a value for 'owner' while saving a Snippet object. You need to do something like this:
from django.contrib.auth.models import User
new_user = User.objects.create(...)
snippet = Snippet(owner=new_user, code='foo = "bar"\n')
snippet.save()
Nevertheless, it doesn't explain why the owner_id column was not created. Can you change the model to something like this and see if it detects the owner column?
from django.contrib.auth.models import User
class Snippet(models.Model):
owner = models.ForeignKey(User, related_name='snippets')
...
Then run these steps to attempt to create the column.
python manage.py makemigrations snippets
python manage.py migrate
It looks like you didn't delete the original database.
The tutorial removes it after the model is changed:
rm -f tmp.db db.sqlite3
rm -r snippets/migrations
python manage.py makemigrations snippets
python manage.py migrate
If you did remove none of the tmp.db or db.sqlite3 then Django might think it has already done the migration and will not redo it.
Make sure you find one of the two aforementioned files and remove them and then run the above script (plus the createsuperuser for you to login in).