Added an alternative algorithm

This commit is contained in:
l3gacy.b3ta 2021-02-16 18:30:58 +00:00
parent 65ffdc156f
commit 6cba333cb4
1 changed files with 169 additions and 0 deletions

View File

@ -0,0 +1,169 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import numpy as np\n",
"from PIL import Image\n",
"import random\n",
"import re\n",
"from collections import defaultdict\n",
"\n",
"#Converts image to \"text\"\n",
"def img_to_text(img_val):\n",
" words = []\n",
" for i in img_val:\n",
" # r g b\n",
" outword = str(i[0]) + ',' + str(i[1]) + ',' + str(i[2])\n",
" words.append(outword)\n",
" return words\n",
"\n",
"#init corpus\n",
"corpus = []\n",
"#load first image\n",
"im = Image.open(\"bauhaus.jpg\", \"r\")\n",
"#Get img text\n",
"pix_val = list(im.getdata())\n",
"img_text = img_to_text(pix_val)\n",
"corpus = corpus + img_text\n",
"\n",
"#load first image\n",
"im = Image.open(\"bauhaus2.jpg\", \"r\")\n",
"#Get img text\n",
"pix_val = list(im.getdata())\n",
"img_text = img_to_text(pix_val)\n",
"corpus = corpus + img_text\n",
"\n",
"#load first image\n",
"im = Image.open(\"bauhaus3.jpg\", \"r\")\n",
"#Get img text\n",
"pix_val = list(im.getdata())\n",
"img_text = img_to_text(pix_val)\n",
"corpus = corpus + img_text\n",
"\n",
"\n",
"#markov model\n",
"markov_graph = defaultdict(lambda: defaultdict(int))\n",
"tokenized_text = corpus\n",
"last_word = tokenized_text[0].lower()\n",
"for word in tokenized_text[1:]:\n",
" word = word.lower()\n",
" markov_graph[last_word][word] += 1\n",
" last_word = word\n",
"\n",
"# Preview graph.\n",
"limit = 3\n",
"for first_word in ('the', 'by', 'who'):\n",
" next_words = list(markov_graph[first_word].keys())[:limit]\n",
" for next_word in next_words:\n",
" print(first_word, next_word)\n",
"\n",
"def walk_graph(graph, distance=5, start_node=None):\n",
" \"\"\"Returns a list of words from a randomly weighted walk.\"\"\"\n",
" if distance <= 0:\n",
" return []\n",
" \n",
" # If not given, pick a start node at random.\n",
" if not start_node:\n",
" start_node = random.choice(list(graph.keys()))\n",
" \n",
" \n",
" weights = np.array(\n",
" list(markov_graph[start_node].values()),\n",
" dtype=np.float64)\n",
" # Normalize word counts to sum to 1.\n",
" weights /= weights.sum()\n",
"\n",
" # Pick a destination using weighted distribution.\n",
" choices = list(markov_graph[start_node].keys())\n",
" chosen_word = np.random.choice(choices, None, p=weights)\n",
" \n",
" return [chosen_word] + walk_graph(\n",
" graph, distance=distance-1,\n",
" start_node=chosen_word)\n",
" \n",
"chain = []\n",
"for i in range(10000):\n",
" chain = chain + walk_graph(markov_graph, distance=12)\n",
"print(chain)"
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {},
"outputs": [],
"source": [
"def chain_to_rgbbytes(ctext):\n",
" out_list = []\n",
" #for each generated pixel\n",
" for i in ctext:\n",
" #split out values\n",
" i = i.split(\",\")\n",
" #for each value in a pixel, add the int to the list\n",
" for x in i:\n",
" out_list.append(int(x))\n",
" #return and convert to bytes:\n",
" return out_list"
]
},
{
"cell_type": "code",
"execution_count": 12,
"metadata": {},
"outputs": [
{
"ename": "ValueError",
"evalue": "not enough image data",
"output_type": "error",
"traceback": [
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[1;31mValueError\u001b[0m Traceback (most recent call last)",
"\u001b[1;32m<ipython-input-12-d1de3a1e6f5d>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m 2\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 3\u001b[0m \u001b[0mcolors\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mbytes\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mchain_to_rgbbytes\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mchain\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 4\u001b[1;33m \u001b[0mimg\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mImage\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfrombytes\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m'RGB'\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m(\u001b[0m\u001b[1;36m1000\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;36m1000\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mcolors\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 5\u001b[0m \u001b[0mimg\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mshow\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;32m~\\anaconda3\\lib\\site-packages\\PIL\\Image.py\u001b[0m in \u001b[0;36mfrombytes\u001b[1;34m(mode, size, data, decoder_name, *args)\u001b[0m\n\u001b[0;32m 2656\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 2657\u001b[0m \u001b[0mim\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mnew\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mmode\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0msize\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m-> 2658\u001b[1;33m \u001b[0mim\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfrombytes\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mdata\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mdecoder_name\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0margs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 2659\u001b[0m \u001b[1;32mreturn\u001b[0m \u001b[0mim\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 2660\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;32m~\\anaconda3\\lib\\site-packages\\PIL\\Image.py\u001b[0m in \u001b[0;36mfrombytes\u001b[1;34m(self, data, decoder_name, *args)\u001b[0m\n\u001b[0;32m 795\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 796\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0ms\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;36m0\u001b[0m\u001b[1;33m]\u001b[0m \u001b[1;33m>=\u001b[0m \u001b[1;36m0\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 797\u001b[1;33m \u001b[1;32mraise\u001b[0m \u001b[0mValueError\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m\"not enough image data\"\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 798\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0ms\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;36m1\u001b[0m\u001b[1;33m]\u001b[0m \u001b[1;33m!=\u001b[0m \u001b[1;36m0\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 799\u001b[0m \u001b[1;32mraise\u001b[0m \u001b[0mValueError\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m\"cannot decode image data\"\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;31mValueError\u001b[0m: not enough image data"
]
}
],
"source": [
"#generate images!\n",
"\n",
"colors = bytes(chain_to_rgbbytes(chain))\n",
"img = Image.frombytes('RGB', (100, 100), colors)\n",
"img.show()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.5"
}
},
"nbformat": 4,
"nbformat_minor": 4
}