February 12, 2020

This challenge was a TCP based image sending system. The server, upon a new connection, would send a Base64 encoded image and then prompt the user what quadrant an image of Dora was in (1-4 counterclockwise starting in the upper-right corner). The image would look something like this:

There are multiple images of Dora used (but only one in each image sent to the client), and could be in grayscale or color. In addition, parts of the Dora were transparent, which allowed the background to bleed through.

The key to solving this challenge was realizing that while Dora's image changes, the images of Dora's friends didn't. This means that you can use "template matching" to find exact copies of Dora's friends, remove them from the image, and then take the average location of all the remaining pixels. Once you have the average location, you can find the quadrant based on the image dimensions (720x720).

Here's what the final (admittedly rough) python script looked like:

from pwn import *
import os
import pyautogui
import cv2
import numpy as np
from os import listdir
from os.path import isfile, join
from matplotlib import pyplot

import base64

doras = [
        "h": 15,
        "w": 10,
        "pl": 53, #Padding left
        "pr": 14, #Padding right
        "pt": 15, #Padding top
        "pb": 19, #Padding bottom
        "img": cv2.imread("swiper.png", 0) #The actual template image
        "h": 32,
        "w": 45,
        "pl": 10,
        "pr": 9,
        "pt": 27,
        "pb": 19,
        "img": cv2.imread("backpack.png", 0)
        "h": 46,
        "w": 12,
        "pl": 31,
        "pr": 29,
        "pt": 19,
        "pb": 12,
        "img": cv2.imread("boots.png", 0)
        "h": 41,
        "w": 19,
        "pl": 26,
        "pr": 19,
        "pt": 15,
        "pb": 21,
        "img": cv2.imread("cow.png", 0)
        "h": 45,
        "w": 15,
        "pl": 17,
        "pr": 36,
        "pt": 14,
        "pb": 18,
        "img": cv2.imread("dino.png", 0)

while True:
    r = remote("", "8000")
    c = 0 #Count
    while True:
        c += 1
        print(r.recvline()) #There's a blank line before each image.
        string = r.recvline()
        if(not string.startswith("No flag")):
            string = base64.b64decode(str(string))
            with open("output.png", "wb") as file:
                file.write(string) #Save the received image

            # Do openCV stuff
            img_g = cv2.imread('output.png', 0)
            bg = img_g[0][0]

            for i in doras: #Iterate through Dora's friends.
                res = cv2.matchTemplate(img_g, i['img'], cv2.TM_SQDIFF_NORMED) #Match the template image
                min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)

                # If the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum
                top_left = (min_loc[0]-i['pl'] - 5, min_loc[1] - i['pt'] - 5) #Get the top left of the image (with a bit of padding)
                bottom_right = (
                    int(min_loc[0] + i['w'] + i['pr'] + 5), int(min_loc[1] + i['h'] + i['pb'] + 5)) #Get the bottom right of the image (with a bit of padding)
                img_g = cv2.rectangle(
                    img_g, top_left, bottom_right, int(img_g[0][0]), -1) #Fill that area with the background.
            match_found = False

            points = []
            for y in range(len(img_g)):
                for x in range(len(img_g[y])):
                    if(img_g[y][x] != bg):
                        points.append((y, x)) #Get every non-background point.

            center = list(np.mean(np.array(points), 0)) #Get the mean point.
            x = int(center[1])
            y = int(center[0])

  , (x, y), 5, 0, -1) #Draw a circle for where Dora is.

            match_found = True
            if(x <= 360):
                if(y <= 360):
                if(y <= 360):

            with open("failedfile.png", "wb") as file: #If it fails, find why.

The 801st output image was the following: