Collage

n. A piece of art made by sticking various different materials, aka PHENOMENA Magazine
Department
programming

programming

from turtle import Turtle, Screen screen = Screen() screen.bgcolor("lightgreen") turtle1 = Turtle(shape='turtle') turtle1.color('red') turtle1.speed("slow") # = 3 turtle1.penup() turtle2 = Turtle(shape='arrow') turtle2.color('blue') turtle2.speed(4) # "slow" (3) < 4 < "normal" (6) turtle2.penup() # user input function perimeter = screen.numinput("Track Perimeter", "Please enter the perimeter:", default=2000, minval=500, maxval=3000) def full_track_crawl(turtle, shortside, longside): speed = turtle.speed() turtle.pendown() for j in range (2): for i in range(0, int(shortside), speed): turtle.forward(speed) yield(0) turtle.left(90) for i in range(0, int(longside), speed): turtle.forward(speed) yield(0) turtle.left(90) turtle.penup() # set the track def drawTrack(perimeter, ratio): shortside = (perimeter / 2.0) / (ratio + 1) longside = ratio * shortside screen.setup(shortside * 2 + 60, longside + 40) turtle1.setposition(-shortside - 10, -longside / 2) turtle2.setposition(10, -longside / 2) generator1 = full_track_crawl(turtle1, shortside, longside) generator2 = full_track_crawl(turtle2, shortside, longside) while (next(generator1, 1) + next(generator2, 1) < 2): pass drawTrack(perimeter, 2) screen.exitonclick()   Result:   Ref. How to move multiple turtles at the same time? I have an assignment which is asked to set two turtles in a race track (same size but separate track). I am able to make them move, but the second one moves only when the first one moved a half of ... https://stackoverflow.com/questions/40050438/how-to-move-multiple-turtles-at-the-same-time
John Doe · May 21, 2023, 9:56 p.m.
발로 만든 게임.ㅋㅋ import turtle as t import random as r import tkinter as tk from tkinter import messagebox def nemo(w, h, c): t.fillcolor(c) t.begin_fill() t.fd(w) t.rt(90) t.fd(h) t.rt(90) t.fd(w) t.rt(90) t.fd(h) t.end_fill() def button(x, y, b_c, msg): t.pu() t.goto(x, y) t.seth(0) nemo(b_w, b_h, b_c) t.goto(x + b_w/2, y - b_h) t.pencolor('white') t.write(msg, align = 'center', font=("Arial", 30, "normal")) b_w = 200; b_h = 60 rock_x = 300; rock_y = 200 scissors_x = 300; scissors_y = 300 paper_x = 300; paper_y = 100 q_x = 300; q_y = -200 c_x = -500; c_y = 300; p_x = -100; p_y = 300 t.speed(500) win = t.Screen() win.setup(1200, 800) button(scissors_x, scissors_y, 'blue', 'Scissors') button(rock_x, rock_y, 'blue', 'Rock') button(paper_x, paper_y, 'blue', 'Paper') button(q_x, q_y, 'black', 'QUIT') button(c_x, c_y, 'red', 'Computer') button(p_x, p_y, 'green', 'Player') t.addshape('imgs/scissors.gif') t.addshape('imgs/rock.gif') t.addshape('imgs/paper.gif') t.title("ORIEL") com = t.Turtle('imgs/paper.gif') player = t.Turtle('imgs/paper.gif') com.pu(); player.pu() com.goto(-400, 0); player.goto(0, 0) t.pu() t.goto(999,999) pw = 0; p = 2 cw = 0; c = 2 def game(c, p): global cw global pw if p == 0: if c == 0: com.shape('imgs/scissors.gif') elif c == 1: com.shape('imgs/rock.gif') cw += 1 else: com.shape('imgs/paper.gif') pw += 1 elif p == 1: if c == 0: com.shape('imgs/scissors.gif') pw += 1 elif c == 1: com.shape('imgs/rock.gif') else: com.shape('imgs/paper.gif') cw += 1 elif p == 2: if c == 0: com.shape('imgs/scissors.gif') cw += 1 elif c == 1: com.shape('imgs/rock.gif') pw += 1 else: com.shape('imgs/paper.gif') if pw == 2 or cw == 2: if pw == 2: print("I won the game. {}:{}".format(pw, cw)) elif cw == 2: print("I lost the game. {}:{}".format(pw, cw)) msg_box = tk.messagebox.askquestion('ORIEL', 'Do you want to continue?', icon='info') if msg_box == 'yes': pw=0; cw=0 else: win.bye() def check(x, y): if(x >= rock_x and x <= rock_x+b_w and y <= rock_y and y >= rock_y-b_h): p = 1 player.shape('imgs/rock.gif') c = r.randint(0, 2) game(c, p) elif(x >= scissors_x and x <= scissors_x+b_w and y <= scissors_y and y >= scissors_y-b_h): p = 0 player.shape('imgs/scissors.gif') c = r.randint(0, 2) game(c, p) elif(x >= paper_x and x <= paper_x+b_w and y <= paper_y and y >= paper_y-b_h): p = 2 player.shape('imgs/paper.gif') c = r.randint(0, 2) game(c, p) elif(x >= q_x and x <= q_x+b_w and y <= q_y and y >= q_y-b_h): win.bye() else: t.goto(999,999) win.onclick(check) t.done()
John Doe · May 15, 2023, 8:23 p.m.
python game
파이썬 터틀을 이용한 간단한 행맨게임   Result :   Code : import random import turtle screen = turtle.Screen() at = turtle.Turtle() word_list = ["테스트", "oriel", "PHENOMENA"] def get_word(): word = random.choice(word_list) return word.upper() def play(word): at.reset() at.pencolor('black') at.pensize(10) at.pu() at.goto(-120, 20) at.pd() word_completion = "_" * len(word) guessed = False guessed_letters = [] guessed_words = [] tries = 6 exec(display_hangman(tries)) print(word_completion) print("\n") while not guessed and tries > 0: guess = screen.textinput("Input", "한 글자만 입력해주세요 : ") if guess is None: break guess = guess.upper() if len(guess) == 1 and guess.isalpha(): if guess in guessed_letters: print("그 글자는 사용했던 글자입니다. 다른 글자를 입력해주세요 : ") elif guess not in word: print(guess, "틀렸습니다.") tries -= 1 exec(display_hangman(tries)) guessed_letters.append(guess) else: print("축하합니다.", guess, "맞는 글자입니다!") guessed_letters.append(guess) word_as_list = list(word_completion) indices = [i for i, letter in enumerate(word) if letter == guess] for index in indices: word_as_list[index] = guess word_completion = "".join(word_as_list) if "_" not in word_completion: guessed = True elif len(guess) == len(word) and guess.isalpha(): if guess in guessed_words: print("", guess) elif guess != word: print(guess, "틀렸습니다.") tries -= 1 exec(display_hangman(tries)) guessed_words.append(guess) else: guessed = True word_completion = word else: print("졌습니다.") exec(display_hangman(tries)) print(word_completion) print("\n") if guessed: print("축하합니다! 게임에서 승리하셨습니다.") else: print("당신은 기회를 다 쓰셨습니다. 단어는 " + word + ". 다음 기회에!") turtle.title("PHENOMENA.COM") turtle.addshape('분필.gif') turtle.shape('분필.gif') turtle.bgpic('칠판.gif') turtle.pu() turtle.goto(-120, 100) turtle.pencolor('black') turtle.pensize(10) turtle.pd() turtle.lt(90) turtle.fd(50) turtle.lt(90) turtle.fd(100) turtle.lt(90) turtle.fd(350) turtle.penup() turtle.goto(-150, -205) turtle.pd() turtle.right(90) turtle.fd(130) turtle.pu() turtle.goto(-120, 100) turtle.pd() def display_hangman(tries): stages = [ # final state: ''' at.pu() at.goto(-120,-100) at.pd() at.lt(90) at.fd(75) ''', # head, torso, both arms, and one leg ''' at.pu() at.goto(-120,-100) at.pd() at.rt(90) at.fd(75) ''', # head, torso, and both arms ''' at.pu() at.goto(-120,-20) at.pd() at.lt(90) at.fd(70) ''', # head, torso, and one arm ''' at.pu() at.goto(-120,-20) at.pd() at.rt(45) at.fd(70) ''', # head and torso ''' at.pu() at.goto(-120,-100) at.pd() at.lt(90) at.fd(115) ''', # head ''' at.circle(40) ''', '' ] return stages[tries] def main(): word = get_word() play(word) while screen.textinput("Next Game", "Play Again? (Y/N) ") in ["y", "Y"]: word = get_word() play(word) if __name__ == "__main__": main()
John Doe · May 8, 2023, 9:51 p.m.
python hangman
ECMAScript 사양에 따르면 Boolean 형식으로 출력했을 때 true 또는 false를 반환하는 기준을 다음과 같이 정의한다.     특정객체가 undefined / null 이면 false를 반환 object면 true를 반환 이런식이다. document.all 의 경우 console.log(typeof document.all); 이렇게 type을 확인해보면 undefined가 찍힌다. 따라서 document.all 은 false 다. ​ 그런데 단순히 아래코드의 결과를 보면 console.log(document.all); HTMLAllCollection 즉 object가 나오는데 왜 false가 될까? ​ 답은 아래에 있다. Document: all property - Web APIs | MDN The Document interface's read-only all property returns an HTMLAllCollection rooted at the document node. In other words, it returns all of the document's elements, accessible by order (like an array) and by ID (like a regular object). https://developer.mozilla.org/en-US/docs/Web/API/Document/all   이곳에 보면 document.all is the only falsy object accessible to JavaScript, because it has the [[IsHTMLDDA]] internal slot. 이런 문구가 있는데, document.all 은 JavaScript에서 액세스할 수 있는 유일한 false 객체라고 한다.   일단 document.all 은 과거 브라우저(ie 등)에서 쓰였던 웹표준에 어긋나는 코딩방식이다. ECMAScript edition 5 를 제창할 당시 요구사항들을 보면 * all 특성은 모든 html 요소와 일치하는 문서 노드에 뿌리를 둔 HTMLAllCollection을 반환해야 한다. - "all" 에 대해 반환된 개체에는 몇 가지 비정상적인 동작이 있다. * 사용자 에이전트(브라우저)는 all 객체를 JavaScript의 boolean 연산을 했을 때 false 값으로 변환하는 것처럼 작동해야 한다. * 사용자 에이전트는 JavaScript에서 == 및 != 연산자를 사용했을 때 all 객체가 마치 정의되지 않은 값과 동일한 것처럼 작동해야 한다. * 사용자 에이전트는 JavaScript의 typeof 연산자가 all 객체에 적용될 때에 문자열 'undefined'를 반환하도록 작동해야 한다. ​ 이러한 요구 사항들은 작성 당시(ECMAScript 버전 5) 현재 JavaScript 사양을 고의적으로 위반한 것이다. JavaScript 사양에는 ToBoolean() 연산자가 모든 객체를 참 값으로 변환해야 하며 특정 연산자의 목적을 위해 정의되지 않은 것처럼 작동하는 객체에 대한 조항은 없다. 이러한 위반은 레거시 콘텐츠의 두 클래스와의 호환성에 대한 요구에 의해 발생했다. 1. 레거시 사용자 에이전트를 감지하는 방법으로 document.all을 사용하고 2. 다른 하나는 특정 요소의 존재 여부를 먼저 테스트하지 않고. 해당 레거시 사용자 에이전트만 지원하고 document.all 객체를 사용하는 경우이다. ​ 예제코드를 보면 if (document.all) { // 과거 ie같은 브라우저에서 사용 } else if (document.getElementById) { // 최신 브라우저에서 사용 } 기본적으로 오랫동안 document.all은 이러한 방식으로 오래된 브라우저를 감지하는 데 사용되었다. 하지만 document.all 을 먼저 검사하기 때문에 두 속성을 모두 제공하는 최신 브라우저는 여전히 document.all 경로안에 있게 된다. 최신 브라우저에서는 물론 document.getElementById를 사용하는 것을 선호하지만 대부분의 브라우저에는 여전히 이전 버전과의 호환성을 위해 document.all이 있기 때문에 document.all이 true이면 else 문은 실행되지 않게 된다. 그래서 코드를 아래처럼 바꾸어야 한다. if (document.getElementById) { // "최신" 브라우저용 `document.getElementById`를 사용하는 코드 } else if (document.all) { // 과거 ie같은 브라우저용 `document.all`을 사용하는 코드 } 하지만 많은 기존 코드는 아직 그 반대다. ​ 이 문제에 대한 가장 간단한 수정방법은 document.all을 여전히 쓸 수 있는 최신 브라우저에서 단순히 document.all을 "false" 로 만드는 것이다. ​ 요약하면 1. 최신 브라우저에서도 document.all 을 이용하여 html 요소객체에 접근이 가능하다. 2. 하지만 위와 같은 코드로 기존버전/최신버전 브라우저를 구분하는 데에는 한계가 있을수 있고 일일이 수정을 해주어야 한다. 3. 따라서 if (document.all) 으로 검사했을 때에는 단순히 document.all 을 false로 반환함과 동시에, document.all 자체를 요소객체에 접근하는 목적으로도 사용할 수 있도록 object (객체) 로 만들었다.   Ref.​ Why is document.all falsy? document.all is a non-primitive object in the DOM that is falsy. For example, this code doesn't do anything: if (document.all) { alert("hello"); } Can someone explain why this is? https://stackoverflow.com/questions/10350142/why-is-document-all-falsy
John Doe · May 4, 2023, 9:32 p.m.
html javascript
JetBrains IDE가 갑자기 실행이 안되고 다음과 같은 에러를 뿜어댄다면? Internal error. Please refer to http://jb.gg/ide/critical-startup-errors java.util.concurrent.CompletionException: java.net.BindException: Address already in use: bind at java.base/java.util.concurrent.CompletableFuture.encodeThrowable(CompletableFuture.java:314) at java.base/java.util.concurrent.CompletableFuture.completeThrowable(CompletableFuture.java:319) at java.base/java.util.concurrent.CompletableFuture$AsyncSupply.run(CompletableFuture.java:1702) at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) at java.base/java.lang.Thread.run(Thread.java:834) Caused by: java.net.BindException: Address already in use: bind at java.base/sun.nio.ch.Net.bind0(Native Method) at java.base/sun.nio.ch.Net.bind(Net.java:455) at java.base/sun.nio.ch.Net.bind(Net.java:447) at java.base/sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:227) at io.netty.channel.socket.nio.NioServerSocketChannel.doBind(NioServerSocketChannel.java:132) at io.netty.channel.AbstractChannel$AbstractUnsafe.bind(AbstractChannel.java:551) at io.netty.channel.DefaultChannelPipeline$HeadContext.bind(DefaultChannelPipeline.java:1346) at io.netty.channel.AbstractChannelHandlerContext.invokeBind(AbstractChannelHandlerContext.java:503) at io.netty.channel.AbstractChannelHandlerContext.bind(AbstractChannelHandlerContext.java:488) at io.netty.channel.DefaultChannelPipeline.bind(DefaultChannelPipeline.java:985) at io.netty.channel.AbstractChannel.bind(AbstractChannel.java:247) at io.netty.bootstrap.AbstractBootstrap$2.run(AbstractBootstrap.java:344) at io.netty.util.concurrent.AbstractEventExecutor.safeExecute(AbstractEventExecutor.java:163) at io.netty.util.concurrent.SingleThreadEventExecutor.runAllTasks(SingleThreadEventExecutor.java:510) at io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:518) at io.netty.util.concurrent.SingleThreadEventExecutor$6.run(SingleThreadEventExecutor.java:1044) at io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ... 1 more ----- JRE 11.0.5+10-b520.30 amd64 by JetBrains s.r.o D:\Jetbrains\apps\CLion\ch-0\193.6015.37\jbr   1. cmd를 관리자모드로 실행 2. hyper-v 비활성화 (몇 번의 재시작이 필요함) dism.exe /Online /Disable-Feature:Microsoft-Hyper-V 3. 필요한 모든 재시작을 완료하면 원하는 포트를 예약하여 hyper-v가 다시 예약하지 않도록 한다. netsh int ipv4 add excludedportrange protocol=tcp startport=50051 numberofports=1 4. hyper-v를 다시 활성화 (이과정에서도 재부팅이 발생) dism.exe /Online /Enable-Feature:Microsoft-Hyper-V /All   문제의 해결법을 보면 알 수 있듯이 window가 부팅하면서 JetBrains사의 IDE가 사용하는 포트를 hyper-v가 점유하여 실행이 안된 것이었다. 위처럼 hyper-v를 끄고 재시작만 수행해도 인텔리제이는 실행되었으나 hyper-v는 사용해야 하기에 추가작업을 실행하였다. https://jb.gg/ide/critical-startup-errors   파이참이 실행되지 않을 땐 pycharm.bat를 실행해보자. 그러면 마찬가지로 다음과 같은 오류메시지를 통해서 정확한 원인을 알 수 있다. java.util.concurrent.CompletionException: java.net.BindException: Address already in use: bind at java.base/java.util.concurrent.CompletableFuture.encodeThrowable(CompletableFuture.java:314) at java.base/java.util.concurrent.CompletableFuture.completeThrowable(CompletableFuture.java:319) at java.base/java.util.concurrent.CompletableFuture$AsyncSupply.run(CompletableFuture.java:1702) at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) at java.base/java.util.concurrent.Executors$PrivilegedThreadFactory$1$1.run(Executors.java:668) at java.base/java.util.concurrent.Executors$PrivilegedThreadFactory$1$1.run(Executors.java:665) at java.base/java.security.AccessController.doPrivileged(Native Method) at java.base/java.util.concurrent.Executors$PrivilegedThreadFactory$1.run(Executors.java:665) at java.base/java.lang.Thread.run(Thread.java:834) Caused by: java.net.BindException: Address already in use: bind at java.base/sun.nio.ch.Net.bind0(Native Method) at java.base/sun.nio.ch.Net.bind(Net.java:455) at java.base/sun.nio.ch.Net.bind(Net.java:447) at java.base/sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:227) at io.netty.channel.socket.nio.NioServerSocketChannel.doBind(NioServerSocketChannel.java:134) at io.netty.channel.AbstractChannel$AbstractUnsafe.bind(AbstractChannel.java:550) at io.netty.channel.DefaultChannelPipeline$HeadContext.bind(DefaultChannelPipeline.java:1334) at io.netty.channel.AbstractChannelHandlerContext.invokeBind(AbstractChannelHandlerContext.java:506) at io.netty.channel.AbstractChannelHandlerContext.bind(AbstractChannelHandlerContext.java:491) at io.netty.channel.DefaultChannelPipeline.bind(DefaultChannelPipeline.java:973) at io.netty.channel.AbstractChannel.bind(AbstractChannel.java:248) at io.netty.bootstrap.AbstractBootstrap$2.run(AbstractBootstrap.java:356) at io.netty.util.concurrent.AbstractEventExecutor.safeExecute(AbstractEventExecutor.java:164) at io.netty.util.concurrent.SingleThreadEventExecutor.runAllTasks(SingleThreadEventExecutor.java:472) at io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:500) at io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:989) at io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ... 1 more ----- JRE 11.0.9+11-b1145.21 amd64 by JetBrains s.r.o. C:\Program Files\JetBrains\PyCharm Community Edition 2020.3\jbr   간단히 다음 명령어 입력으로 해결할 수 있었다. C:\WINDOWS\system32>net stop winnat The Windows NAT Driver service was stopped successfully. C:\WINDOWS\system32>net start winnat The Windows NAT Driver service was started successfully. https://intellij-support.jetbrains.com/hc/en-us/community/posts/360010020399-Pycharm-2020-3-Internal-Error-java-net-BindException-Address-already-in-use-bind-Windows-10
John Doe · April 4, 2023, 8:22 a.m.
JetBrains PyCharm
Create your virtual environment. download package in this order Django==3.0.8 djangorestframework==3.11.0 websocket-client==0.57.0 redis==3.5.3 asgiref==3.2.10 channels-redis==2.4.2 channels==3.0.1 Then create a Django project named ChatApp. django-admin startproject ChatApp After installing channels, add channels to your installed apps. INSTALLED_APPS = [ 'chat.apps.ChatConfig', 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', # add django channels 'channels' , ] Set the ASGI application to your default ASGI file in the project. ASGI_APPLICATION = 'ChatApp.asgi.application' Create a new app that will have all the chat functionality. python manage.py startapp chat And add your app to the installed apps in settings.py. And add chat/urls.py from django.urls import path, include from chat import views as chat_views urlpatterns = [ path("chat", chat_views.chatPage, name="chat-page"), ] And add chat/routing.py from django.urls import re_path from chat.consumers import ChatConsumer # Here, "ws" is routing to the URL ChatConsumer which # will handle the chat functionality. websocket_urlpatterns = [ re_path(r'ws$', ChatConsumer.as_asgi()), ] And add chat/consumers.py import json from channels.generic.websocket import AsyncWebsocketConsumer class ChatConsumer(AsyncWebsocketConsumer): async def connect(self): self.roomGroupName = "group_chat_gfg" await self.channel_layer.group_add( self.roomGroupName, self.channel_name ) await self.accept() async def disconnect(self, close_code): await self.channel_layer.group_discard( self.roomGroupName, self.channel_name ) async def receive(self, text_data): text_data_json = json.loads(text_data) message = text_data_json["message"] username = text_data_json["username"] await self.channel_layer.group_send( self.roomGroupName, { "type": "sendMessage", "message": message, "username": username, }) async def sendMessage(self, event): message = event["message"] username = event["username"] await self.send(text_data=json.dumps({"message": message, "username": username})) And add ChatApp/asgi.py * Has anyone had problem like this? Traceback (most recent call last): File "/path/to/my/env/bin/daphne", line 11, in <module> sys.exit(CommandLineInterface.entrypoint()) File "/path/to/my/env/lib/python3.6/site-packages/daphne/cli.py", line 161, in entrypoint cls().run(sys.argv[1:]) File "/path/to/my/env/lib/python3.6/site-packages/daphne/cli.py", line 222, in run application = import_by_path(args.application) File "/path/to/my/env/lib/python3.6/site-packages/daphne/utils.py", line 12, in import_by_path target = importlib.import_module(module_path) File "/path/to/my/env/lib/python3.6/importlib/__init__.py", line 126, in import_module return _bootstrap._gcd_import(name[level:], package, level) File "<frozen importlib._bootstrap>", line 994, in _gcd_import File "<frozen importlib._bootstrap>", line 971, in _find_and_load File "<frozen importlib._bootstrap>", line 955, in _find_and_load_unlocked File "<frozen importlib._bootstrap>", line 665, in _load_unlocked File "<frozen importlib._bootstrap_external>", line 678, in exec_module File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed File "./my_project/asgi.py", line 5, in <module> application = get_default_application() File "/path/to/my/env/lib/python3.6/site-packages/channels/routing.py", line 33, in get_default_application module = importlib.import_module(path) File "/path/to/my/env/lib/python3.6/importlib/__init__.py", line 126, in import_module return _bootstrap._gcd_import(name[level:], package, level) File "<frozen importlib._bootstrap>", line 994, in _gcd_import File "<frozen importlib._bootstrap>", line 971, in _find_and_load File "<frozen importlib._bootstrap>", line 955, in _find_and_load_unlocked File "<frozen importlib._bootstrap>", line 665, in _load_unlocked File "<frozen importlib._bootstrap_external>", line 678, in exec_module File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed File "./my_project/routing.py", line 4, in <module> from channels.auth import AuthMiddlewareStack File "/path/to/my/env/lib/python3.6/site-packages/channels/auth.py", line 12, in <module> from django.contrib.auth.models import AnonymousUser File "/path/to/my/env/lib/python3.6/site-packages/django/contrib/auth/models.py", line 2, in <module> from django.contrib.auth.base_user import AbstractBaseUser, BaseUserManager File "/path/to/my/env/lib/python3.6/site-packages/django/contrib/auth/base_user.py", line 47, in <module> class AbstractBaseUser(models.Model): File "/path/to/my/env/lib/python3.6/site-packages/django/db/models/base.py", line 100, in __new__ app_config = apps.get_containing_app_config(module) File "/path/to/my/env/lib/python3.6/site-packages/django/apps/registry.py", line 244, in get_containing_app_config self.check_apps_ready() File "/path/to/my/env/lib/python3.6/site-packages/django/apps/registry.py", line 127, in check_apps_ready raise AppRegistryNotReady("Apps aren't loaded yet.") django.core.exceptions.AppRegistryNotReady: Apps aren't loaded yet. Then visit this page. Django apps aren't loaded yet when using asgi I'm tring to run my django project with usage of asgi instead of wsgi. I have set up my routing.py and asgi.py as follows: routing.py from django.conf.urls import url from channels.routing import https://stackoverflow.com/questions/53683806/django-apps-arent-loaded-yet-when-using-asgi   ChatApp/settings.py CHANNEL_LAYERS = { "default": { "BACKEND": "channels.layers.InMemoryChannelLayer" } }   Using Redis: CHANNEL_LAYERS = { 'default': { 'BACKEND': 'channels_redis.core.RedisChannelLayer', 'CONFIG': { "hosts": [('127.0.0.1', 6379)] }, }, }   * Deploying Django Channels: how to keep Daphne running after exiting shell on web server Deploying Django Channels: how to keep Daphne running after exiting shell on web server As practice, I'm trying to deploy Andrew Godwin's multichat example with Django Channels 2.1.1 on DigitalOcean Ubuntu 16.04.4. However, I don't know how to exit the Ubuntu server without Channels' ... https://stackoverflow.com/questions/50192967/deploying-django-channels-how-to-keep-daphne-running-after-exiting-shell-on-web   Nginx 1. Windows location @django { proxy_pass http://127.0.0.1:1234; proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-Proto $scheme; proxy_read_timeout 600s; # this next line adds the Host header so that apache knows which vHost to serve # the $host variable is automatically set to the hostname Nginx is responding to proxy_set_header Host $host; #Websocket support proxy_http_version 1.1; proxy_set_header Upgrade $http_upgrade; proxy_set_header Connection "upgrade"; } 2. Linux with Daphne Make service-name.service [Unit] Description=Indifference Daphne Service After=network.target [Service] Type=simple User=indifference WorkingDirectory=/home/indifference/path/to/indifference ExecStart=/home/indifference/path/to/bin/daphne -p 3333 indifference.asgi:application access-log=/data/logs/indifference/daphne/access.log [Install] WantedBy=multi-user.target chmod 755 service-name.service systemctl daemon-reload systemctl enable service-name.service systemctl start service-name.service Update nginx.conf  upstream channels-indifference-backend { server localhost:3333; } ... location /ws { proxy_pass http://channels-indifference-backend; proxy_http_version 1.1; proxy_set_header Upgrade $http_upgrade; proxy_set_header Connection "upgrade"; proxy_redirect off; proxy_set_header Host $host; proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-Host $server_name; } Still not working? Just restart it. systemctl stop service-name.service systemctl start service-name.service systemctl status service-name.service   Windows Django - dev Create a folder called config config/ commonsettings.py dev.py prod.py make sure that in dev.py and prod.py you import everything from commonsettings.py like this: from .commonsettings import * dev.py sample INSTALLED_APPS = [ ... # 'channels', ... ] ASGI_APPLICATION = None then if you want to run the dev.py settings: python manage.py runserver --settings=config.dev In order to run your asgi application, simply point Daphne to your ASGI application, and optionally set a bind address and port (defaults to localhost, port 8000): daphne -b 0.0.0.0 -p 9001 myproject.asgi:application Nginx WS config is the same with 2. Linux with Daphne * Use ASGI to deploy Django, StreamingHttpResponse cannot be accessed. Async support for StreamingHttpResponse was only added in Django 4.2.   You can check the program with this: https://www.phenomena.com/chat   Ref. Realtime chat app using Django - GeeksforGeeks A Computer Science portal for geeks. It contains well written, well thought and well explained computer science and programming articles, quizzes and practice/competitive programming/company interview Questions. https://www.geeksforgeeks.org/realtime-chat-app-using-django/ Django Channel Custom Authentication Middleware __call__() missing 2 required positional arguments: 'receive' and 'send' I am writing a custom authentication middleware for django channels class TokenAuthMiddleware: def __init__(self, inner): # Store the ASGI application we were passed self.inner = https://stackoverflow.com/questions/64625473/django-channel-custom-authentication-middleware-call-missing-2-required-po Configuring ASGI Django Application using Daphne and Nginx Server Adding Daphne to preexisting project. https://ritiktaneja.medium.com/configuring-asgi-django-application-using-daphne-and-nginx-server-59a90456fe17 Channels cannot be used with StreamingHttpResponse So you’re saying that this exact code works if you run it under a wsgi container? https://forum.djangoproject.com/t/channels-cannot-be-used-with-streaminghttpresponse/10105/4
John Doe · March 2, 2023, 6:55 a.m.
django daphne nginx redis
<Without docker-compose.yml> 1. Install Docker 2. Elasticsearch 7.16.2 2-1. Install Elasticsearch # docker pull docker.elastic.co/elasticsearch/elasticsearch:7.16.2 2-2. Execute Elasticsearch # docker run -d -p 9200:9200 -p 9300:9300 --name es_x -e "discovery.type=single-node" -e "xpack.security.enabled=false" -e ES_JAVA_OPTS="-Xms200m -Xmx200m" docker.elastic.co/elasticsearch/elasticsearch:7.16.2 8fd1161fed14b8e048e0f929a37cfacbab56371dc4c2b134602469c321cc0421 2-3. Check Execution # docker ps CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 8fd1161fed14 docker.elastic.co/elasticsearch/elasticsearch:7.16.2 "/bin/tini -- /usr/l…" 45 seconds ago Up 43 seconds 0.0.0.0:9200->9200/tcp, 0.0.0.0:9300->9300/tcp es_x 2-4. Check your Elasticsearch settings # docker exec -i -t es_x cat /usr/share/elasticsearch/config/elasticsearch.yml cluster.name: "docker-cluster" network.host: 0.0.0.0 2-5. Stop # docker stop es_x es_x   <With docker-compose.yml> 1. See up to and including Item 2-1. 2. docker-compose.yml version: '3.6' services: elasticsearch: image: 'docker.elastic.co/elasticsearch/elasticsearch:7.16.2' container_name: es_a ports: - 9200:9200 environment: - node.name="es_a" - cluster.name="es-docker-cluster" - bootstrap.memory_lock=true - discovery.type=single-node #- "ES_JAVA_OPTS=-Xms200m -Xmx200m" volumes: - ./esdata:/usr/share/elasticsearch/data - ./eslog:/usr/share/elasticsearch/log ulimits: memlock: soft: -1 hard: -1 networks: - elastic deploy: resources: limits: memory: 1GB # Use at most 50 MB of RAM elasticsearch2: image: 'docker.elastic.co/elasticsearch/elasticsearch:7.16.2' container_name: es_b ports: - 9201:9200 environment: - node.name="es_b" - cluster.name="es-docker-cluster2" - bootstrap.memory_lock=true - discovery.type=single-node #- "ES_JAVA_OPTS=-Xms200m -Xmx200m" volumes: - ./esdata2:/usr/share/elasticsearch/data - ./eslog2:/usr/share/elasticsearch/log ulimits: memlock: soft: -1 hard: -1 networks: - elastic2 deploy: resources: limits: memory: 1GB # Use at most 50 MB of RAM volumes: esdata: driver: local driver_opts: o: bind type: none #device: /data esdata2: driver: local driver_opts: o: bind type: none #device: /data2 eslog: driver: local eslog2: driver: local networks: elastic: driver: bridge elastic2: driver: bridge # docker-compose -f docker-compose.yml up -d # docker-compose -f docker-compose.yml stop # docker-compose -f docker-compose.yml rm 3. Result # docker-compose -f docker-compose.yml up -d [+] Running 2/2 - Container es_a Started 2.1s - Container es_b Started # docker-compose -f docker-compose.yml stop [+] Running 2/2 - Container es_a Stopped 1.2s - Container es_b Stopped 3-1. 9200 { "name" : "\"es_a\"", "cluster_name" : "\"es-docker-cluster\"", "cluster_uuid" : "KwFTIgGCT7e8ZDNJIau6Jw", "version" : { "number" : "7.16.2", "build_flavor" : "default", "build_type" : "docker", "build_hash" : "2b937c44140b6559905130a8650c64dbd0879cfb", "build_date" : "2021-12-18T19:42:46.604893745Z", "build_snapshot" : false, "lucene_version" : "8.10.1", "minimum_wire_compatibility_version" : "6.8.0", "minimum_index_compatibility_version" : "6.0.0-beta1" }, "tagline" : "You Know, for Search" } 3-2. 9201 { "name" : "\"es_b\"", "cluster_name" : "\"es-docker-cluster2\"", "cluster_uuid" : "MZhKEnPuTdu5iNoatWOnQA", "version" : { "number" : "7.16.2", "build_flavor" : "default", "build_type" : "docker", "build_hash" : "2b937c44140b6559905130a8650c64dbd0879cfb", "build_date" : "2021-12-18T19:42:46.604893745Z", "build_snapshot" : false, "lucene_version" : "8.10.1", "minimum_wire_compatibility_version" : "6.8.0", "minimum_index_compatibility_version" : "6.0.0-beta1" }, "tagline" : "You Know, for Search" }   * If error raised 'Custom Analyzer [nori_korean] failed to find tokenizer under name [nori_tokenizer]' Install Korean (nori) Analysis Plugin # sudo bin/elasticsearch-plugin install analysis-nori The plugin must be installed on every node in the cluster, and each node must be restarted after installation.   * Cannot bind to some ports due to permission denied Run as admin: net stop winnat net start winnat   Ref. Docker bind elasticsearch volume in app folder I have the following docker-compose file : version: "3.3" services: elasticsearch: image: docker.elastic.co/elasticsearch/elasticsearch:6.1.1 volumes: - esdata:/usr/share/ https://stackoverflow.com/questions/52373356/docker-bind-elasticsearch-volume-in-app-folder Docker, running multiple container (elasticsearch) I'm running one elasticsearch with version: '3' services: elasticsearch: build: context: . dockerfile: ./compose/elasticsearch/Dockerfile args: - VERSION=${VERSION} - M... https://stackoverflow.com/questions/59500443/docker-running-multiple-container-elasticsearch Enable Hyper-V on Windows 10 Install Hyper-V on Windows 10 https://learn.microsoft.com/en-us/virtualization/hyper-v-on-windows/quick-start/enable-hyper-v ElasticSearch on docker - 2nd instance kills the first instance I'm trying to run multiple versions of ElasticSearch at the same time, should be easy. Here are my commands: docker run -d --rm -p 9250:9200 -p 9350:9300 --name es_5_3_3_integration -e "xpack.sec... https://stackoverflow.com/questions/47540547/elasticsearch-on-docker-2nd-instance-kills-the-first-instance ElasticSearch - cannot run two es docker containers at the same time ElasticSearch - cannot run two es docker containers at the same time I'm trying to run 2 services of ElasticSearch using docker-compose.yaml Every time I run docker-compose up -d only one service is https://stackoverflow.com/questions/72273655/elasticsearch-cannot-run-two-es-docker-containers-at-the-same-time Cannot bind to some ports due to permission denied For the last 3 months or so I'm having random errors where I can't bind a specific port where our Identity server is running on my local development workstation. At first I thought it's my broken m... https://stackoverflow.com/questions/48478869/cannot-bind-to-some-ports-due-to-permission-denied [Windows 10] Docker 설치 완벽 가이드(Home 포함) Docker는 경량 가상화 기술인 리눅스 컨테이너 도구입니다. Windows 10 Home과 Pro에서도 몇 가지 설정을 통해 Docker Desktop으로 리눅스 컨테이너를 사용할 수 있습니다. 이 글에서는 Windows 10에서 Docker Desktop을 설치하는 방법을 총 정리합니다. https://www.lainyzine.com/ko/article/a-complete-guide-to-how-to-install-docker-desktop-on-windows-10/ 윈도우10 WSL2 설치하기, WSL2 우분투 설치하기 WSL이란? Linux용 Windows 하위 시스템을 사용하면 개발자가 기존 가상 머신의 오버헤드 또는 듀얼 부팅 설정 없이 대부분의 명령줄 도구, 유틸리티 및 애플리케이션을 비롯한 GNU/Linux 환경을 수정하지 않고 Windows에서 직접 실행할 수 있습니다. WSL2와 WSL1 비교 WSL 2는 Windows 10, 버전 1903, 빌드 18362 이상에서만 사용할 수 있습니다. 윈도우10에서 WSL2 설치해서 우분투 리눅스를 이용해보자 1. 윈 https://gaesae.com/161 Docker 기반의 Elasticsearch 설치 및 실행 본 문서에서는 Docker 환경에서 Elasticsearch 7.9 버전을 설치하고 실행하는 방법에 대해서 설명하고 있습니다. https://jinhokwon.github.io/devops/elasticsearch/elasticsearch-docker/ Korean (nori) Analysis Plugin | Elasticsearch Plugins and Integrations [6.4] | Elastic IMPORTANT: No additional bug fixes or documentation updates will be released for this version. For the latest information, see the current release documentation. Elastic Docs› Elasticsearch Plugins and Integrations [6.4]› Analysis Plugins « kuromoji_numbe https://www.elastic.co/guide/en/elasticsearch/plugins/6.4/analysis-nori.html#analysis-nori-install
John Doe · Feb. 7, 2023, 9:33 a.m.
elasticsearch docker
Yes and I did that recently with my laptop for experimentation with things that I would like to do with ES Cluster. This setup simplifies my environment, instead of using docker or using multiple machines/VMs/EC2 instances. You can download and use the .tar.gz file recommended above, and then you can do one of the following options option 1 - untar the file into two directories: es-01, es-02 then update the config file in each node option 2 - untar the file into one directory: es-instance, copy the config directory to another location with something like path/to/es-01/config, path/to/es-02/config then create a script to launch elasticsearch from the same binary location (which is es-instance) but use different config location (one for es-01/config, one for es-02/config) You need to update the following parameters in config/elasticsearch.yml file to separate one node from the other. cluster.name node.name path.data path.logs http.port   Can I run multiple Elasticsearch instances in one server? Can I run multiple Elasticsearch instances in one server? -If yes, How can I install 2 Elasticsearch instances in a single Linux server. I am aware if we use "yum install" will be installed without any issue. Thanks in Advance https://discuss.elastic.co/t/can-i-run-multiple-elasticsearch-instances-in-one-server/321230 how to run multiple instances of elasticsearch on one host I have several machines each with 128 GB of ram, each host is running a single instance of Elasticsearch. I would like to run another data node on each host and allocate around 30 GB to the jvm he... https://stackoverflow.com/questions/28482449/how-to-run-multiple-instances-of-elasticsearch-on-one-host   You need to prepare two elasticsearch.yml config files to configure settings accordingly and specify these files when startup up the two nodes. /elasticsearch/config/elasticsearch.yml /elasticsearch/config2/elasticsearch.yml /elasticsearch/config2/elasticsearch.yml cluster.name: trinitarian node.name: node-2 path.data: /path/to/data path.logs: path/to/logs network.host: 127.0.0.1 network.bind_host: 127.0.0.1 network.publish_host: 127.0.0.1 http.port: 9500   Linux Assuming your rpm or deb created an init.d script, to start a second node on the same machine do as follows: cd /etc/init.d cp --preserve elasticsearch elasticsearch2 Edit elasticsearch2 script: change # elasticsearch to # elasticsearch2 add node="2" after line prog="elasticsearch" change pidfile=/var/run/elasticsearch/${prog}.pid to pidfile=/var/run/elasticsearch/${prog}${node}.pid change lockfile=/var/lock/subsys/$prog to lockfile=/var/lock/subsys/$prog$node change echo -n $"Starting $prog: " to echo -n $"Starting $prog: (node $node)" change echo -n $"Stopping $prog: " to echo -n $"Stopping $prog: (node $node)" Save the file. Execute chkconfig --add elasticsearch2 service elasticsearch2 start I also had to add -Des.config=<path-to-second-config-files> to get it working   Run multiple elasticsearch nodes as a service on one Ubuntu-Server I have a server running Ubuntu 14.04 with 220 GB of ram on which I'd like to run elasticsearch. According to the documentation, one node should not have more than 32 GB of RAM, so I guess I have to... https://stackoverflow.com/questions/26162690/run-multiple-elasticsearch-nodes-as-a-service-on-one-ubuntu-server   Windows /elasticsearch/bin/elasticsearch-env2.bat if not defined ES_PATH_CONF ( set ES_PATH_CONF=!ES_HOME!\config2 ) /elasticsearch/bin/elasticsearch2.bat CALL "%~dp0elasticsearch-env2.bat" || exit /b 1   Check this: http://localhost:9200/ { "name" : "BEAST", "cluster_name" : "elasticsearch", "cluster_uuid" : "EVBl4ttGSWKP8MJJMivMCg", "version" : { "number" : "7.16.2", "build_flavor" : "default", "build_type" : "zip", "build_hash" : "2b937c44140b6559905130a8650c64dbd0879cfb", "build_date" : "2021-12-18T19:42:46.604893745Z", "build_snapshot" : false, "lucene_version" : "8.10.1", "minimum_wire_compatibility_version" : "6.8.0", "minimum_index_compatibility_version" : "6.0.0-beta1" }, "tagline" : "You Know, for Search" } http://localhost:9500/ { "name" : "node-2", "cluster_name" : "trinitarian", "cluster_uuid" : "QRUfSRYISH-20jU7x9yxog", "version" : { "number" : "7.16.2", "build_flavor" : "default", "build_type" : "zip", "build_hash" : "2b937c44140b6559905130a8650c64dbd0879cfb", "build_date" : "2021-12-18T19:42:46.604893745Z", "build_snapshot" : false, "lucene_version" : "8.10.1", "minimum_wire_compatibility_version" : "6.8.0", "minimum_index_compatibility_version" : "6.0.0-beta1" }, "tagline" : "You Know, for Search" }   Rebuild index: PS E:\pythonProjects\trinitarian\test> python manage.py search_index --rebuild --model xxx Are you sure you want to delete the 'board' indexes? [y/N]: y Deleting index 'board' Creating index 'board' Indexing 26 'Board' objects PS E:\pythonProjects\trinitarian\test>   Test with this code (test.py): import elasticsearch elastic = elasticsearch.Elasticsearch(hosts=["localhost:9200"]) indices = elastic.indices.get_alias().keys() print(indices) elastic2 = elasticsearch.Elasticsearch(hosts=["localhost:9500"]) indices2 = elastic2.indices.get_alias().keys() print(indices2)   Result: E:\pythonProjects\trinitarian\venv\Scripts\python.exe E:/pythonProjects/trinitarian/test/app/test.py dict_keys(['a', 'b', 'c', 'board']) dict_keys(['board']) Process finished with exit code 0
John Doe · Feb. 6, 2023, 4:18 a.m.
elasticsearch
  • 1
  • 2
  • 3 (current)
  • 4
  • 5