My name is Hoe Jiun Tian (何俊添). I am currently third year PhD student in Nanyang Technological University, Singapore under supervision of Prof Tan Yap Peng and Prof Jiang Xudong. Previously, I did research in computer vision and deep learning under supervision of Prof Chan Chee Seng in my undergraduate study in University of Malaya.
I am currently interested in image generation, diffusion models, human-object interactions.
@inproceedings{interactdiffusion2024,title={InteractDiffusion: Interaction Control in Text-to-Image Diffusion Models},author={Hoe, Jiun Tian and Jiang, Xudong and Chan, Chee Seng and Tan, Yap-Peng and Hu, Weipeng},booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},year={2024},abs={Large-scale text-to-image (T2I) diffusion models have showcased incredible capabilities in generating coherent images based on textual descriptions, enabling vast applications in content generation. While recent advancements have introduced control over factors such as object localization, posture, and image contours, a crucial gap remains in our ability to control the interactions between objects in the generated content. Well-controlling interactions in generated images could yield meaningful applications, such as creating realistic scenes with interacting characters. In this work, we study the problems of conditioning T2I diffusion models with Human-Object Interaction (HOI) information, consisting of a triplet label (person, action, object) and corresponding bounding boxes. We propose a pluggable interaction control model, called InteractDiffusion that extends existing pre-trained T2I diffusion models to enable them being better conditioned on interactions. Specifically, we tokenize the HOI information and learn their relationships via interaction embeddings. A conditioning self-attention layer is trained to map HOI tokens to visual tokens, thereby conditioning the visual tokens better in existing T2I diffusion models. Our model attains the ability to control the interaction and location on existing T2I diffusion models, which outperforms existing baselines by a large margin in HOI detection score, as well as fidelity in FID and KID. Project page: https://jiuntian.github.io/interactdiffusion.},url={https://arxiv.org/pdf/2312.05849.pdf},}
NeurIPS
One Loss for All: Deep Hashing with a Single Cosine Similarity based Learning Objective
Jiun Tian Hoe, Kam Woh Ng, Tianyu Zhang, and 3 more authors
In Advances in Neural Information Processing Systems (NeurIPS), 2021
@inproceedings{orthohash2021,title={One Loss for All: Deep Hashing with a Single Cosine Similarity based Learning Objective},author={Hoe, Jiun Tian and Ng, Kam Woh and Zhang, Tianyu and Chan, Chee Seng and Song, Yi-Zhe and Xiang, Tao},booktitle={Advances in Neural Information Processing Systems (NeurIPS)},year={2021},abs={A deep hashing model typically has two main learning objectives: to make the learned binary hash codes discriminative and to minimize a quantization error. With further constraints such as bit balance and code orthogonality, it is not uncommon for existing models to employ a large number (>4) of losses. This leads to difficulties in model training and subsequently impedes their effectiveness. In this work, we propose a novel deep hashing model with only . Specifically, we show that maximizing the cosine similarity between the continuous codes and their corresponding can ensure both hash code discriminativeness and quantization error minimization. Further, with this learning objective, code balancing can be achieved by simply using a Batch Normalization (BN) layer and multi-label classification is also straightforward with label smoothing. The result is a one-loss deep hashing model that removes all the hassles of tuning the weights of various losses. Importantly, extensive experiments show that our model is highly effective, outperforming the state-of-the-art multi-loss hashing models on three large-scale instance retrieval benchmarks, often by significant margins. },url={https://proceedings.neurips.cc/paper/2021/hash/cbcb58ac2e496207586df2854b17995f-Abstract.html},}